aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r--drivers/net/bnx2x/Makefile7
-rw-r--r--drivers/net/bnx2x/bnx2x.h2080
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c3593
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h1491
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c2508
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h203
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h1156
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c2389
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h410
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h38
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h5131
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h567
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h912
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c12472
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h493
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c11624
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h7177
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.c5692
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.h1297
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c1599
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h381
21 files changed, 61220 insertions, 0 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
new file mode 100644
index 00000000000..48fbdd48f88
--- /dev/null
+++ b/drivers/net/bnx2x/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Broadcom 10-Gigabit ethernet driver
3#
4
5obj-$(CONFIG_BNX2X) += bnx2x.o
6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
new file mode 100644
index 00000000000..9a7eb3b36cf
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -0,0 +1,2080 @@
1/* bnx2x.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 */
13
14#ifndef BNX2X_H
15#define BNX2X_H
16#include <linux/netdevice.h>
17#include <linux/dma-mapping.h>
18#include <linux/types.h>
19
20/* compilation time flags */
21
22/* define this to make the driver freeze on error to allow getting debug info
23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */
25
26#define DRV_MODULE_VERSION "1.70.00-0"
27#define DRV_MODULE_RELDATE "2011/06/13"
28#define BNX2X_BC_VER 0x040200
29
30#if defined(CONFIG_DCB)
31#define BCM_DCBNL
32#endif
33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
34#define BCM_CNIC 1
35#include "../cnic_if.h"
36#endif
37
38#ifdef BCM_CNIC
39#define BNX2X_MIN_MSIX_VEC_CNT 3
40#define BNX2X_MSIX_VEC_FP_START 2
41#else
42#define BNX2X_MIN_MSIX_VEC_CNT 2
43#define BNX2X_MSIX_VEC_FP_START 1
44#endif
45
46#include <linux/mdio.h>
47
48#include "bnx2x_reg.h"
49#include "bnx2x_fw_defs.h"
50#include "bnx2x_hsi.h"
51#include "bnx2x_link.h"
52#include "bnx2x_sp.h"
53#include "bnx2x_dcb.h"
54#include "bnx2x_stats.h"
55
56/* error/debug prints */
57
58#define DRV_MODULE_NAME "bnx2x"
59
60/* for messages that are currently off */
61#define BNX2X_MSG_OFF 0
62#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
63#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
64#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
65#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
66#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
67#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
68
69#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
70
71/* regular debug print */
72#define DP(__mask, __fmt, __args...) \
73do { \
74 if (bp->msg_enable & (__mask)) \
75 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
76 __func__, __LINE__, \
77 bp->dev ? (bp->dev->name) : "?", \
78 ##__args); \
79} while (0)
80
81#define DP_CONT(__mask, __fmt, __args...) \
82do { \
83 if (bp->msg_enable & (__mask)) \
84 pr_cont(__fmt, ##__args); \
85} while (0)
86
87/* errors debug print */
88#define BNX2X_DBG_ERR(__fmt, __args...) \
89do { \
90 if (netif_msg_probe(bp)) \
91 pr_err("[%s:%d(%s)]" __fmt, \
92 __func__, __LINE__, \
93 bp->dev ? (bp->dev->name) : "?", \
94 ##__args); \
95} while (0)
96
97/* for errors (never masked) */
98#define BNX2X_ERR(__fmt, __args...) \
99do { \
100 pr_err("[%s:%d(%s)]" __fmt, \
101 __func__, __LINE__, \
102 bp->dev ? (bp->dev->name) : "?", \
103 ##__args); \
104 } while (0)
105
106#define BNX2X_ERROR(__fmt, __args...) do { \
107 pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
108 } while (0)
109
110
111/* before we have a dev->name use dev_info() */
112#define BNX2X_DEV_INFO(__fmt, __args...) \
113do { \
114 if (netif_msg_probe(bp)) \
115 dev_info(&bp->pdev->dev, __fmt, ##__args); \
116} while (0)
117
118#define BNX2X_MAC_FMT "%pM"
119#define BNX2X_MAC_PRN_LIST(mac) (mac)
120
121
122#ifdef BNX2X_STOP_ON_ERROR
123void bnx2x_int_disable(struct bnx2x *bp);
124#define bnx2x_panic() do { \
125 bp->panic = 1; \
126 BNX2X_ERR("driver assert\n"); \
127 bnx2x_int_disable(bp); \
128 bnx2x_panic_dump(bp); \
129 } while (0)
130#else
131#define bnx2x_panic() do { \
132 bp->panic = 1; \
133 BNX2X_ERR("driver assert\n"); \
134 bnx2x_panic_dump(bp); \
135 } while (0)
136#endif
137
138#define bnx2x_mc_addr(ha) ((ha)->addr)
139#define bnx2x_uc_addr(ha) ((ha)->addr)
140
141#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
142#define U64_HI(x) (u32)(((u64)(x)) >> 32)
143#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
144
145
146#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
147
148#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
149#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
150#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
151
152#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
153#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
154#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
155
156#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
157#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
158
159#define REG_RD_DMAE(bp, offset, valp, len32) \
160 do { \
161 bnx2x_read_dmae(bp, offset, len32);\
162 memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
163 } while (0)
164
165#define REG_WR_DMAE(bp, offset, valp, len32) \
166 do { \
167 memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
168 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
169 offset, len32); \
170 } while (0)
171
172#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
173 REG_WR_DMAE(bp, offset, valp, len32)
174
175#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
176 do { \
177 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
178 bnx2x_write_big_buf_wb(bp, addr, len32); \
179 } while (0)
180
181#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
182 offsetof(struct shmem_region, field))
183#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
184#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
185
186#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
187 offsetof(struct shmem2_region, field))
188#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
189#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
190#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
191 offsetof(struct mf_cfg, field))
192#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
193 offsetof(struct mf2_cfg, field))
194
195#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
196#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
197 MF_CFG_ADDR(bp, field), (val))
198#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
199
200#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
201 (SHMEM2_RD((bp), size) > \
202 offsetof(struct shmem2_region, field)))
203
204#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
205#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
206
207/* SP SB indices */
208
209/* General SP events - stats query, cfc delete, etc */
210#define HC_SP_INDEX_ETH_DEF_CONS 3
211
212/* EQ completions */
213#define HC_SP_INDEX_EQ_CONS 7
214
215/* FCoE L2 connection completions */
216#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
217#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
218/* iSCSI L2 */
219#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
220#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
221
222/* Special clients parameters */
223
224/* SB indices */
225/* FCoE L2 */
226#define BNX2X_FCOE_L2_RX_INDEX \
227 (&bp->def_status_blk->sp_sb.\
228 index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
229
230#define BNX2X_FCOE_L2_TX_INDEX \
231 (&bp->def_status_blk->sp_sb.\
232 index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
233
234/**
235 * CIDs and CLIDs:
236 * CLIDs below is a CLID for func 0, then the CLID for other
237 * functions will be calculated by the formula:
238 *
239 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
240 *
241 */
242enum {
243 BNX2X_ISCSI_ETH_CL_ID_IDX,
244 BNX2X_FCOE_ETH_CL_ID_IDX,
245 BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
246};
247
248#define BNX2X_CNIC_START_ETH_CID 48
249enum {
250 /* iSCSI L2 */
251 BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
252 /* FCoE L2 */
253 BNX2X_FCOE_ETH_CID,
254};
255
256/** Additional rings budgeting */
257#ifdef BCM_CNIC
258#define CNIC_PRESENT 1
259#define FCOE_PRESENT 1
260#else
261#define CNIC_PRESENT 0
262#define FCOE_PRESENT 0
263#endif /* BCM_CNIC */
264#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
265
266#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
267 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
268
269#define SM_RX_ID 0
270#define SM_TX_ID 1
271
272/* defines for multiple tx priority indices */
273#define FIRST_TX_ONLY_COS_INDEX 1
274#define FIRST_TX_COS_INDEX 0
275
276/* defines for decodeing the fastpath index and the cos index out of the
277 * transmission queue index
278 */
279#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
280
281#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
282#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
283
284/* rules for calculating the cids of tx-only connections */
285#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
286#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
287
288/* fp index inside class of service range */
289#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
290
291/*
292 * 0..15 eth cos0
293 * 16..31 eth cos1 if applicable
294 * 32..47 eth cos2 If applicable
295 * fcoe queue follows eth queues (16, 32, 48 depending on cos)
296 */
297#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
298#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
299
300/* fast path */
301struct sw_rx_bd {
302 struct sk_buff *skb;
303 DEFINE_DMA_UNMAP_ADDR(mapping);
304};
305
306struct sw_tx_bd {
307 struct sk_buff *skb;
308 u16 first_bd;
309 u8 flags;
310/* Set on the first BD descriptor when there is a split BD */
311#define BNX2X_TSO_SPLIT_BD (1<<0)
312};
313
314struct sw_rx_page {
315 struct page *page;
316 DEFINE_DMA_UNMAP_ADDR(mapping);
317};
318
319union db_prod {
320 struct doorbell_set_prod data;
321 u32 raw;
322};
323
324/* dropless fc FW/HW related params */
325#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
326#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
327 ETH_MAX_AGGREGATION_QUEUES_E1 :\
328 ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
329#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
330#define FW_PREFETCH_CNT 16
331#define DROPLESS_FC_HEADROOM 100
332
333/* MC hsi */
334#define BCM_PAGE_SHIFT 12
335#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
336#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
337#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
338
339#define PAGES_PER_SGE_SHIFT 0
340#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
341#define SGE_PAGE_SIZE PAGE_SIZE
342#define SGE_PAGE_SHIFT PAGE_SHIFT
343#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
344
345/* SGE ring related macros */
346#define NUM_RX_SGE_PAGES 2
347#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
348#define NEXT_PAGE_SGE_DESC_CNT 2
349#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
350/* RX_SGE_CNT is promised to be a power of 2 */
351#define RX_SGE_MASK (RX_SGE_CNT - 1)
352#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
353#define MAX_RX_SGE (NUM_RX_SGE - 1)
354#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
355 (MAX_RX_SGE_CNT - 1)) ? \
356 (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
357 (x) + 1)
358#define RX_SGE(x) ((x) & MAX_RX_SGE)
359
360/*
361 * Number of required SGEs is the sum of two:
362 * 1. Number of possible opened aggregations (next packet for
363 * these aggregations will probably consume SGE immidiatelly)
364 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
365 * after placement on BD for new TPA aggregation)
366 *
367 * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
368 */
369#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
370 (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
371#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
372 MAX_RX_SGE_CNT)
373#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
374 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
375#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
376
377/* Manipulate a bit vector defined as an array of u64 */
378
379/* Number of bits in one sge_mask array element */
380#define BIT_VEC64_ELEM_SZ 64
381#define BIT_VEC64_ELEM_SHIFT 6
382#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
383
384
385#define __BIT_VEC64_SET_BIT(el, bit) \
386 do { \
387 el = ((el) | ((u64)0x1 << (bit))); \
388 } while (0)
389
390#define __BIT_VEC64_CLEAR_BIT(el, bit) \
391 do { \
392 el = ((el) & (~((u64)0x1 << (bit)))); \
393 } while (0)
394
395
396#define BIT_VEC64_SET_BIT(vec64, idx) \
397 __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
398 (idx) & BIT_VEC64_ELEM_MASK)
399
400#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
401 __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
402 (idx) & BIT_VEC64_ELEM_MASK)
403
404#define BIT_VEC64_TEST_BIT(vec64, idx) \
405 (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
406 ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
407
408/* Creates a bitmask of all ones in less significant bits.
409 idx - index of the most significant bit in the created mask */
410#define BIT_VEC64_ONES_MASK(idx) \
411 (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
412#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0))
413
414/*******************************************************/
415
416
417
418/* Number of u64 elements in SGE mask array */
419#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
420 BIT_VEC64_ELEM_SZ)
421#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
422#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
423
424union host_hc_status_block {
425 /* pointer to fp status block e1x */
426 struct host_hc_status_block_e1x *e1x_sb;
427 /* pointer to fp status block e2 */
428 struct host_hc_status_block_e2 *e2_sb;
429};
430
431struct bnx2x_agg_info {
432 /*
433 * First aggregation buffer is an skb, the following - are pages.
434 * We will preallocate the skbs for each aggregation when
435 * we open the interface and will replace the BD at the consumer
436 * with this one when we receive the TPA_START CQE in order to
437 * keep the Rx BD ring consistent.
438 */
439 struct sw_rx_bd first_buf;
440 u8 tpa_state;
441#define BNX2X_TPA_START 1
442#define BNX2X_TPA_STOP 2
443#define BNX2X_TPA_ERROR 3
444 u8 placement_offset;
445 u16 parsing_flags;
446 u16 vlan_tag;
447 u16 len_on_bd;
448};
449
450#define Q_STATS_OFFSET32(stat_name) \
451 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
452
453struct bnx2x_fp_txdata {
454
455 struct sw_tx_bd *tx_buf_ring;
456
457 union eth_tx_bd_types *tx_desc_ring;
458 dma_addr_t tx_desc_mapping;
459
460 u32 cid;
461
462 union db_prod tx_db;
463
464 u16 tx_pkt_prod;
465 u16 tx_pkt_cons;
466 u16 tx_bd_prod;
467 u16 tx_bd_cons;
468
469 unsigned long tx_pkt;
470
471 __le16 *tx_cons_sb;
472
473 int txq_index;
474};
475
476struct bnx2x_fastpath {
477 struct bnx2x *bp; /* parent */
478
479#define BNX2X_NAPI_WEIGHT 128
480 struct napi_struct napi;
481 union host_hc_status_block status_blk;
482 /* chip independed shortcuts into sb structure */
483 __le16 *sb_index_values;
484 __le16 *sb_running_index;
485 /* chip independed shortcut into rx_prods_offset memory */
486 u32 ustorm_rx_prods_offset;
487
488 u32 rx_buf_size;
489
490 dma_addr_t status_blk_mapping;
491
492 u8 max_cos; /* actual number of active tx coses */
493 struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
494
495 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
496 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
497
498 struct eth_rx_bd *rx_desc_ring;
499 dma_addr_t rx_desc_mapping;
500
501 union eth_rx_cqe *rx_comp_ring;
502 dma_addr_t rx_comp_mapping;
503
504 /* SGE ring */
505 struct eth_rx_sge *rx_sge_ring;
506 dma_addr_t rx_sge_mapping;
507
508 u64 sge_mask[RX_SGE_MASK_LEN];
509
510 u32 cid;
511
512 __le16 fp_hc_idx;
513
514 u8 index; /* number in fp array */
515 u8 cl_id; /* eth client id */
516 u8 cl_qzone_id;
517 u8 fw_sb_id; /* status block number in FW */
518 u8 igu_sb_id; /* status block number in HW */
519
520 u16 rx_bd_prod;
521 u16 rx_bd_cons;
522 u16 rx_comp_prod;
523 u16 rx_comp_cons;
524 u16 rx_sge_prod;
525 /* The last maximal completed SGE */
526 u16 last_max_sge;
527 __le16 *rx_cons_sb;
528 unsigned long rx_pkt,
529 rx_calls;
530
531 /* TPA related */
532 struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
533 u8 disable_tpa;
534#ifdef BNX2X_STOP_ON_ERROR
535 u64 tpa_queue_used;
536#endif
537
538 struct tstorm_per_queue_stats old_tclient;
539 struct ustorm_per_queue_stats old_uclient;
540 struct xstorm_per_queue_stats old_xclient;
541 struct bnx2x_eth_q_stats eth_q_stats;
542
543 /* The size is calculated using the following:
544 sizeof name field from netdev structure +
545 4 ('-Xx-' string) +
546 4 (for the digits and to make it DWORD aligned) */
547#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
548 char name[FP_NAME_SIZE];
549
550 /* MACs object */
551 struct bnx2x_vlan_mac_obj mac_obj;
552
553 /* Queue State object */
554 struct bnx2x_queue_sp_obj q_obj;
555
556};
557
558#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
559
560/* Use 2500 as a mini-jumbo MTU for FCoE */
561#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
562
563/* FCoE L2 `fastpath' entry is right after the eth entries */
564#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
565#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
566#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
567#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
568 txdata[FIRST_TX_COS_INDEX].var)
569
570
571#define IS_ETH_FP(fp) (fp->index < \
572 BNX2X_NUM_ETH_QUEUES(fp->bp))
573#ifdef BCM_CNIC
574#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
575#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
576#else
577#define IS_FCOE_FP(fp) false
578#define IS_FCOE_IDX(idx) false
579#endif
580
581
582/* MC hsi */
583#define MAX_FETCH_BD 13 /* HW max BDs per packet */
584#define RX_COPY_THRESH 92
585
586#define NUM_TX_RINGS 16
587#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
588#define NEXT_PAGE_TX_DESC_CNT 1
589#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
590#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
591#define MAX_TX_BD (NUM_TX_BD - 1)
592#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
593#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
594 (MAX_TX_DESC_CNT - 1)) ? \
595 (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
596 (x) + 1)
597#define TX_BD(x) ((x) & MAX_TX_BD)
598#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
599
600/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
601#define NUM_RX_RINGS 8
602#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
603#define NEXT_PAGE_RX_DESC_CNT 2
604#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
605#define RX_DESC_MASK (RX_DESC_CNT - 1)
606#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
607#define MAX_RX_BD (NUM_RX_BD - 1)
608#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
609
610/* dropless fc calculations for BDs
611 *
612 * Number of BDs should as number of buffers in BRB:
613 * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
614 * "next" elements on each page
615 */
616#define NUM_BD_REQ BRB_SIZE(bp)
617#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
618 MAX_RX_DESC_CNT)
619#define BD_TH_LO(bp) (NUM_BD_REQ + \
620 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
621 FW_DROP_LEVEL(bp))
622#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
623
624#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
625
626#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
627 ETH_MIN_RX_CQES_WITH_TPA_E1 : \
628 ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
629#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA
630#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
631#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
632 MIN_RX_AVAIL))
633
634#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
635 (MAX_RX_DESC_CNT - 1)) ? \
636 (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
637 (x) + 1)
638#define RX_BD(x) ((x) & MAX_RX_BD)
639
640/*
641 * As long as CQE is X times bigger than BD entry we have to allocate X times
642 * more pages for CQ ring in order to keep it balanced with BD ring
643 */
644#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
645#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
646#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
647#define NEXT_PAGE_RCQ_DESC_CNT 1
648#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
649#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
650#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
651#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
652#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
653 (MAX_RCQ_DESC_CNT - 1)) ? \
654 (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
655 (x) + 1)
656#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
657
658/* dropless fc calculations for RCQs
659 *
660 * Number of RCQs should be as number of buffers in BRB:
661 * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
662 * "next" elements on each page
663 */
664#define NUM_RCQ_REQ BRB_SIZE(bp)
665#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
666 MAX_RCQ_DESC_CNT)
667#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
668 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
669 FW_DROP_LEVEL(bp))
670#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
671
672
673/* This is needed for determining of last_max */
674#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
675#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
676
677
678#define BNX2X_SWCID_SHIFT 17
679#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
680
681/* used on a CID received from the HW */
682#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK)
683#define CQE_CMD(x) (le32_to_cpu(x) >> \
684 COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
685
686#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
687 le32_to_cpu((bd)->addr_lo))
688#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
689
690#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
691#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
692#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
693#error "Min DB doorbell stride is 8"
694#endif
695#define DPM_TRIGER_TYPE 0x40
696#define DOORBELL(bp, cid, val) \
697 do { \
698 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
699 DPM_TRIGER_TYPE); \
700 } while (0)
701
702
703/* TX CSUM helpers */
704#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
705 skb->csum_offset)
706#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
707 skb->csum_offset))
708
709#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
710
711#define XMIT_PLAIN 0
712#define XMIT_CSUM_V4 0x1
713#define XMIT_CSUM_V6 0x2
714#define XMIT_CSUM_TCP 0x4
715#define XMIT_GSO_V4 0x8
716#define XMIT_GSO_V6 0x10
717
718#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
719#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
720
721
722/* stuff added to make the code fit 80Col */
723#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
724#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
725#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
726#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
727#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
728
729#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
730
731#define BNX2X_IP_CSUM_ERR(cqe) \
732 (!((cqe)->fast_path_cqe.status_flags & \
733 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
734 ((cqe)->fast_path_cqe.type_error_flags & \
735 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
736
737#define BNX2X_L4_CSUM_ERR(cqe) \
738 (!((cqe)->fast_path_cqe.status_flags & \
739 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
740 ((cqe)->fast_path_cqe.type_error_flags & \
741 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
742
743#define BNX2X_RX_CSUM_OK(cqe) \
744 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
745
746#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
747 (((le16_to_cpu(flags) & \
748 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
749 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
750 == PRS_FLAG_OVERETH_IPV4)
751#define BNX2X_RX_SUM_FIX(cqe) \
752 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
753
754
755#define FP_USB_FUNC_OFF \
756 offsetof(struct cstorm_status_block_u, func)
757#define FP_CSB_FUNC_OFF \
758 offsetof(struct cstorm_status_block_c, func)
759
760#define HC_INDEX_ETH_RX_CQ_CONS 1
761
762#define HC_INDEX_OOO_TX_CQ_CONS 4
763
764#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
765
766#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
767
768#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
769
770#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
771
772#define BNX2X_RX_SB_INDEX \
773 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
774
775#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
776
777#define BNX2X_TX_SB_INDEX_COS0 \
778 (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
779
780/* end of fast path */
781
782/* common */
783
784struct bnx2x_common {
785
786 u32 chip_id;
787/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
788#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
789
790#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
791#define CHIP_NUM_57710 0x164e
792#define CHIP_NUM_57711 0x164f
793#define CHIP_NUM_57711E 0x1650
794#define CHIP_NUM_57712 0x1662
795#define CHIP_NUM_57712_MF 0x1663
796#define CHIP_NUM_57713 0x1651
797#define CHIP_NUM_57713E 0x1652
798#define CHIP_NUM_57800 0x168a
799#define CHIP_NUM_57800_MF 0x16a5
800#define CHIP_NUM_57810 0x168e
801#define CHIP_NUM_57810_MF 0x16ae
802#define CHIP_NUM_57840 0x168d
803#define CHIP_NUM_57840_MF 0x16ab
804#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
805#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
806#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
807#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
808#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
809#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
810#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
811#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
812#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
813#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
814#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
815#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
816 CHIP_IS_57711E(bp))
817#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
818 CHIP_IS_57712_MF(bp))
819#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
820 CHIP_IS_57800_MF(bp) || \
821 CHIP_IS_57810(bp) || \
822 CHIP_IS_57810_MF(bp) || \
823 CHIP_IS_57840(bp) || \
824 CHIP_IS_57840_MF(bp))
825#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
826#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
827#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
828
829#define CHIP_REV_SHIFT 12
830#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT)
831#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK)
832#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT)
833#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT)
834/* assume maximum 5 revisions */
835#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000)
836/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
837#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
838 !(CHIP_REV_VAL(bp) & 0x00001000))
839/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
840#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
841 (CHIP_REV_VAL(bp) & 0x00001000))
842
843#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
844 ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
845
846#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
847#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
848#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
849 (CHIP_REV_SHIFT + 1)) \
850 << CHIP_REV_SHIFT)
851#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \
852 CHIP_REV_SIM(bp) :\
853 CHIP_REV_VAL(bp))
854#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \
855 (CHIP_REV(bp) == CHIP_REV_Bx))
856#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
857 (CHIP_REV(bp) == CHIP_REV_Ax))
858
859 int flash_size;
860#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
861#define BNX2X_NVRAM_TIMEOUT_COUNT 30000
862#define BNX2X_NVRAM_PAGE_SIZE 256
863
864 u32 shmem_base;
865 u32 shmem2_base;
866 u32 mf_cfg_base;
867 u32 mf2_cfg_base;
868
869 u32 hw_config;
870
871 u32 bc_ver;
872
873 u8 int_block;
874#define INT_BLOCK_HC 0
875#define INT_BLOCK_IGU 1
876#define INT_BLOCK_MODE_NORMAL 0
877#define INT_BLOCK_MODE_BW_COMP 2
878#define CHIP_INT_MODE_IS_NBC(bp) \
879 (!CHIP_IS_E1x(bp) && \
880 !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
881#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
882
883 u8 chip_port_mode;
884#define CHIP_4_PORT_MODE 0x0
885#define CHIP_2_PORT_MODE 0x1
886#define CHIP_PORT_MODE_NONE 0x2
887#define CHIP_MODE(bp) (bp->common.chip_port_mode)
888#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
889};
890
891/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
892#define BNX2X_IGU_STAS_MSG_VF_CNT 64
893#define BNX2X_IGU_STAS_MSG_PF_CNT 4
894
895/* end of common */
896
897/* port */
898
899struct bnx2x_port {
900 u32 pmf;
901
902 u32 link_config[LINK_CONFIG_SIZE];
903
904 u32 supported[LINK_CONFIG_SIZE];
905/* link settings - missing defines */
906#define SUPPORTED_2500baseX_Full (1 << 15)
907
908 u32 advertising[LINK_CONFIG_SIZE];
909/* link settings - missing defines */
910#define ADVERTISED_2500baseX_Full (1 << 15)
911
912 u32 phy_addr;
913
914 /* used to synchronize phy accesses */
915 struct mutex phy_mutex;
916 int need_hw_lock;
917
918 u32 port_stx;
919
920 struct nig_stats old_nig_stats;
921};
922
923/* end of port */
924
925#define STATS_OFFSET32(stat_name) \
926 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
927
928/* slow path */
929
930/* slow path work-queue */
931extern struct workqueue_struct *bnx2x_wq;
932
933#define BNX2X_MAX_NUM_OF_VFS 64
934#define BNX2X_VF_ID_INVALID 0xFF
935
936/*
937 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
938 * control by the number of fast-path status blocks supported by the
939 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
940 * status block represents an independent interrupts context that can
941 * serve a regular L2 networking queue. However special L2 queues such
942 * as the FCoE queue do not require a FP-SB and other components like
943 * the CNIC may consume FP-SB reducing the number of possible L2 queues
944 *
945 * If the maximum number of FP-SB available is X then:
946 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
947 * regular L2 queues is Y=X-1
948 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
949 * c. If the FCoE L2 queue is supported the actual number of L2 queues
950 * is Y+1
951 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
952 * slow-path interrupts) or Y+2 if CNIC is supported (one additional
953 * FP interrupt context for the CNIC).
954 * e. The number of HW context (CID count) is always X or X+1 if FCoE
955 * L2 queue is supported. the cid for the FCoE L2 queue is always X.
956 */
957
958/* fast-path interrupt contexts E1x */
959#define FP_SB_MAX_E1x 16
960/* fast-path interrupt contexts E2 */
961#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
962
963union cdu_context {
964 struct eth_context eth;
965 char pad[1024];
966};
967
968/* CDU host DB constants */
969#define CDU_ILT_PAGE_SZ_HW 3
970#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
971#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
972
973#ifdef BCM_CNIC
974#define CNIC_ISCSI_CID_MAX 256
975#define CNIC_FCOE_CID_MAX 2048
976#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
977#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
978#endif
979
980#define QM_ILT_PAGE_SZ_HW 0
981#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
982#define QM_CID_ROUND 1024
983
984#ifdef BCM_CNIC
985/* TM (timers) host DB constants */
986#define TM_ILT_PAGE_SZ_HW 0
987#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
988/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
989#define TM_CONN_NUM 1024
990#define TM_ILT_SZ (8 * TM_CONN_NUM)
991#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
992
993/* SRC (Searcher) host DB constants */
994#define SRC_ILT_PAGE_SZ_HW 0
995#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
996#define SRC_HASH_BITS 10
997#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
998#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
999#define SRC_T2_SZ SRC_ILT_SZ
1000#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
1001
1002#endif
1003
1004#define MAX_DMAE_C 8
1005
1006/* DMA memory not used in fastpath */
1007struct bnx2x_slowpath {
1008 union {
1009 struct mac_configuration_cmd e1x;
1010 struct eth_classify_rules_ramrod_data e2;
1011 } mac_rdata;
1012
1013
1014 union {
1015 struct tstorm_eth_mac_filter_config e1x;
1016 struct eth_filter_rules_ramrod_data e2;
1017 } rx_mode_rdata;
1018
1019 union {
1020 struct mac_configuration_cmd e1;
1021 struct eth_multicast_rules_ramrod_data e2;
1022 } mcast_rdata;
1023
1024 struct eth_rss_update_ramrod_data rss_rdata;
1025
1026 /* Queue State related ramrods are always sent under rtnl_lock */
1027 union {
1028 struct client_init_ramrod_data init_data;
1029 struct client_update_ramrod_data update_data;
1030 } q_rdata;
1031
1032 union {
1033 struct function_start_data func_start;
1034 /* pfc configuration for DCBX ramrod */
1035 struct flow_control_configuration pfc_config;
1036 } func_rdata;
1037
1038 /* used by dmae command executer */
1039 struct dmae_command dmae[MAX_DMAE_C];
1040
1041 u32 stats_comp;
1042 union mac_stats mac_stats;
1043 struct nig_stats nig_stats;
1044 struct host_port_stats port_stats;
1045 struct host_func_stats func_stats;
1046 struct host_func_stats func_stats_base;
1047
1048 u32 wb_comp;
1049 u32 wb_data[4];
1050};
1051
1052#define bnx2x_sp(bp, var) (&bp->slowpath->var)
1053#define bnx2x_sp_mapping(bp, var) \
1054 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
1055
1056
1057/* attn group wiring */
1058#define MAX_DYNAMIC_ATTN_GRPS 8
1059
1060struct attn_route {
1061 u32 sig[5];
1062};
1063
1064struct iro {
1065 u32 base;
1066 u16 m1;
1067 u16 m2;
1068 u16 m3;
1069 u16 size;
1070};
1071
1072struct hw_context {
1073 union cdu_context *vcxt;
1074 dma_addr_t cxt_mapping;
1075 size_t size;
1076};
1077
1078/* forward */
1079struct bnx2x_ilt;
1080
1081
1082enum bnx2x_recovery_state {
1083 BNX2X_RECOVERY_DONE,
1084 BNX2X_RECOVERY_INIT,
1085 BNX2X_RECOVERY_WAIT,
1086 BNX2X_RECOVERY_FAILED
1087};
1088
1089/*
1090 * Event queue (EQ or event ring) MC hsi
1091 * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
1092 */
1093#define NUM_EQ_PAGES 1
1094#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
1095#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
1096#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
1097#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
1098#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
1099
1100/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
1101#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
1102 (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
1103
1104/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
1105#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
1106
1107#define BNX2X_EQ_INDEX \
1108 (&bp->def_status_blk->sp_sb.\
1109 index_values[HC_SP_INDEX_EQ_CONS])
1110
1111/* This is a data that will be used to create a link report message.
1112 * We will keep the data used for the last link report in order
1113 * to prevent reporting the same link parameters twice.
1114 */
1115struct bnx2x_link_report_data {
1116 u16 line_speed; /* Effective line speed */
1117 unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
1118};
1119
1120enum {
1121 BNX2X_LINK_REPORT_FD, /* Full DUPLEX */
1122 BNX2X_LINK_REPORT_LINK_DOWN,
1123 BNX2X_LINK_REPORT_RX_FC_ON,
1124 BNX2X_LINK_REPORT_TX_FC_ON,
1125};
1126
1127enum {
1128 BNX2X_PORT_QUERY_IDX,
1129 BNX2X_PF_QUERY_IDX,
1130 BNX2X_FIRST_QUEUE_QUERY_IDX,
1131};
1132
1133struct bnx2x_fw_stats_req {
1134 struct stats_query_header hdr;
1135 struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
1136};
1137
1138struct bnx2x_fw_stats_data {
1139 struct stats_counter storm_counters;
1140 struct per_port_stats port;
1141 struct per_pf_stats pf;
1142 struct per_queue_stats queue_stats[1];
1143};
1144
1145/* Public slow path states */
1146enum {
1147 BNX2X_SP_RTNL_SETUP_TC,
1148 BNX2X_SP_RTNL_TX_TIMEOUT,
1149};
1150
1151
1152struct bnx2x {
1153 /* Fields used in the tx and intr/napi performance paths
1154 * are grouped together in the beginning of the structure
1155 */
1156 struct bnx2x_fastpath *fp;
1157 void __iomem *regview;
1158 void __iomem *doorbells;
1159 u16 db_size;
1160
1161 u8 pf_num; /* absolute PF number */
1162 u8 pfid; /* per-path PF number */
1163 int base_fw_ndsb; /**/
1164#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
1165#define BP_PORT(bp) (bp->pfid & 1)
1166#define BP_FUNC(bp) (bp->pfid)
1167#define BP_ABS_FUNC(bp) (bp->pf_num)
1168#define BP_VN(bp) ((bp)->pfid >> 1)
1169#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
1170#define BP_L_ID(bp) (BP_VN(bp) << 2)
1171#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
1172 (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
1173#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1174
1175 struct net_device *dev;
1176 struct pci_dev *pdev;
1177
1178 const struct iro *iro_arr;
1179#define IRO (bp->iro_arr)
1180
1181 enum bnx2x_recovery_state recovery_state;
1182 int is_leader;
1183 struct msix_entry *msix_table;
1184
1185 int tx_ring_size;
1186
1187/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
1188#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
1189#define ETH_MIN_PACKET_SIZE 60
1190#define ETH_MAX_PACKET_SIZE 1500
1191#define ETH_MAX_JUMBO_PACKET_SIZE 9600
1192
1193 /* Max supported alignment is 256 (8 shift) */
1194#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
1195 L1_CACHE_SHIFT : 8)
1196 /* FW use 2 Cache lines Alignment for start packet and size */
1197#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
1198#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
1199
1200 struct host_sp_status_block *def_status_blk;
1201#define DEF_SB_IGU_ID 16
1202#define DEF_SB_ID HC_SP_SB_ID
1203 __le16 def_idx;
1204 __le16 def_att_idx;
1205 u32 attn_state;
1206 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
1207
1208 /* slow path ring */
1209 struct eth_spe *spq;
1210 dma_addr_t spq_mapping;
1211 u16 spq_prod_idx;
1212 struct eth_spe *spq_prod_bd;
1213 struct eth_spe *spq_last_bd;
1214 __le16 *dsb_sp_prod;
1215 atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
1216 /* used to synchronize spq accesses */
1217 spinlock_t spq_lock;
1218
1219 /* event queue */
1220 union event_ring_elem *eq_ring;
1221 dma_addr_t eq_mapping;
1222 u16 eq_prod;
1223 u16 eq_cons;
1224 __le16 *eq_cons_sb;
1225 atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
1226
1227
1228
1229 /* Counter for marking that there is a STAT_QUERY ramrod pending */
1230 u16 stats_pending;
1231 /* Counter for completed statistics ramrods */
1232 u16 stats_comp;
1233
1234 /* End of fields used in the performance code paths */
1235
1236 int panic;
1237 int msg_enable;
1238
1239 u32 flags;
1240#define PCIX_FLAG (1 << 0)
1241#define PCI_32BIT_FLAG (1 << 1)
1242#define ONE_PORT_FLAG (1 << 2)
1243#define NO_WOL_FLAG (1 << 3)
1244#define USING_DAC_FLAG (1 << 4)
1245#define USING_MSIX_FLAG (1 << 5)
1246#define USING_MSI_FLAG (1 << 6)
1247#define DISABLE_MSI_FLAG (1 << 7)
1248#define TPA_ENABLE_FLAG (1 << 8)
1249#define NO_MCP_FLAG (1 << 9)
1250
1251#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
1252#define MF_FUNC_DIS (1 << 11)
1253#define OWN_CNIC_IRQ (1 << 12)
1254#define NO_ISCSI_OOO_FLAG (1 << 13)
1255#define NO_ISCSI_FLAG (1 << 14)
1256#define NO_FCOE_FLAG (1 << 15)
1257
1258#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1259#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
1260#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
1261
1262 int pm_cap;
1263 int mrrs;
1264
1265 struct delayed_work sp_task;
1266 struct delayed_work sp_rtnl_task;
1267
1268 struct delayed_work period_task;
1269 struct timer_list timer;
1270 int current_interval;
1271
1272 u16 fw_seq;
1273 u16 fw_drv_pulse_wr_seq;
1274 u32 func_stx;
1275
1276 struct link_params link_params;
1277 struct link_vars link_vars;
1278 u32 link_cnt;
1279 struct bnx2x_link_report_data last_reported_link;
1280
1281 struct mdio_if_info mdio;
1282
1283 struct bnx2x_common common;
1284 struct bnx2x_port port;
1285
1286 struct cmng_struct_per_port cmng;
1287 u32 vn_weight_sum;
1288 u32 mf_config[E1HVN_MAX];
1289 u32 mf2_config[E2_FUNC_MAX];
1290 u32 path_has_ovlan; /* E3 */
1291 u16 mf_ov;
1292 u8 mf_mode;
1293#define IS_MF(bp) (bp->mf_mode != 0)
1294#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
1295#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
1296
1297 u8 wol;
1298
1299 int rx_ring_size;
1300
1301 u16 tx_quick_cons_trip_int;
1302 u16 tx_quick_cons_trip;
1303 u16 tx_ticks_int;
1304 u16 tx_ticks;
1305
1306 u16 rx_quick_cons_trip_int;
1307 u16 rx_quick_cons_trip;
1308 u16 rx_ticks_int;
1309 u16 rx_ticks;
1310/* Maximal coalescing timeout in us */
1311#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
1312
1313 u32 lin_cnt;
1314
1315 u16 state;
1316#define BNX2X_STATE_CLOSED 0
1317#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
1318#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
1319#define BNX2X_STATE_OPEN 0x3000
1320#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
1321#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
1322
1323#define BNX2X_STATE_DIAG 0xe000
1324#define BNX2X_STATE_ERROR 0xf000
1325
1326 int multi_mode;
1327#define BNX2X_MAX_PRIORITY 8
1328#define BNX2X_MAX_ENTRIES_PER_PRI 16
1329#define BNX2X_MAX_COS 3
1330#define BNX2X_MAX_TX_COS 2
1331 int num_queues;
1332 int disable_tpa;
1333
1334 u32 rx_mode;
1335#define BNX2X_RX_MODE_NONE 0
1336#define BNX2X_RX_MODE_NORMAL 1
1337#define BNX2X_RX_MODE_ALLMULTI 2
1338#define BNX2X_RX_MODE_PROMISC 3
1339#define BNX2X_MAX_MULTICAST 64
1340
1341 u8 igu_dsb_id;
1342 u8 igu_base_sb;
1343 u8 igu_sb_cnt;
1344 dma_addr_t def_status_blk_mapping;
1345
1346 struct bnx2x_slowpath *slowpath;
1347 dma_addr_t slowpath_mapping;
1348
1349 /* Total number of FW statistics requests */
1350 u8 fw_stats_num;
1351
1352 /*
1353 * This is a memory buffer that will contain both statistics
1354 * ramrod request and data.
1355 */
1356 void *fw_stats;
1357 dma_addr_t fw_stats_mapping;
1358
1359 /*
1360 * FW statistics request shortcut (points at the
1361 * beginning of fw_stats buffer).
1362 */
1363 struct bnx2x_fw_stats_req *fw_stats_req;
1364 dma_addr_t fw_stats_req_mapping;
1365 int fw_stats_req_sz;
1366
1367 /*
1368 * FW statistics data shortcut (points at the begining of
1369 * fw_stats buffer + fw_stats_req_sz).
1370 */
1371 struct bnx2x_fw_stats_data *fw_stats_data;
1372 dma_addr_t fw_stats_data_mapping;
1373 int fw_stats_data_sz;
1374
1375 struct hw_context context;
1376
1377 struct bnx2x_ilt *ilt;
1378#define BP_ILT(bp) ((bp)->ilt)
1379#define ILT_MAX_LINES 256
1380/*
1381 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
1382 * to CNIC.
1383 */
1384#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT)
1385
1386/*
1387 * Maximum CID count that might be required by the bnx2x:
1388 * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
1389 */
1390#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
1391 NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1392#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1393 ILT_PAGE_CIDS))
1394#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
1395
1396 int qm_cid_count;
1397
1398 int dropless_fc;
1399
1400#ifdef BCM_CNIC
1401 u32 cnic_flags;
1402#define BNX2X_CNIC_FLAG_MAC_SET 1
1403 void *t2;
1404 dma_addr_t t2_mapping;
1405 struct cnic_ops __rcu *cnic_ops;
1406 void *cnic_data;
1407 u32 cnic_tag;
1408 struct cnic_eth_dev cnic_eth_dev;
1409 union host_hc_status_block cnic_sb;
1410 dma_addr_t cnic_sb_mapping;
1411 struct eth_spe *cnic_kwq;
1412 struct eth_spe *cnic_kwq_prod;
1413 struct eth_spe *cnic_kwq_cons;
1414 struct eth_spe *cnic_kwq_last;
1415 u16 cnic_kwq_pending;
1416 u16 cnic_spq_pending;
1417 u8 fip_mac[ETH_ALEN];
1418 struct mutex cnic_mutex;
1419 struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
1420
1421 /* Start index of the "special" (CNIC related) L2 cleints */
1422 u8 cnic_base_cl_id;
1423#endif
1424
1425 int dmae_ready;
1426 /* used to synchronize dmae accesses */
1427 spinlock_t dmae_lock;
1428
1429 /* used to protect the FW mail box */
1430 struct mutex fw_mb_mutex;
1431
1432 /* used to synchronize stats collecting */
1433 int stats_state;
1434
1435 /* used for synchronization of concurrent threads statistics handling */
1436 spinlock_t stats_lock;
1437
1438 /* used by dmae command loader */
1439 struct dmae_command stats_dmae;
1440 int executer_idx;
1441
1442 u16 stats_counter;
1443 struct bnx2x_eth_stats eth_stats;
1444
1445 struct z_stream_s *strm;
1446 void *gunzip_buf;
1447 dma_addr_t gunzip_mapping;
1448 int gunzip_outlen;
1449#define FW_BUF_SIZE 0x8000
1450#define GUNZIP_BUF(bp) (bp->gunzip_buf)
1451#define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
1452#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
1453
1454 struct raw_op *init_ops;
1455 /* Init blocks offsets inside init_ops */
1456 u16 *init_ops_offsets;
1457 /* Data blob - has 32 bit granularity */
1458 u32 *init_data;
1459 u32 init_mode_flags;
1460#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags)
1461 /* Zipped PRAM blobs - raw data */
1462 const u8 *tsem_int_table_data;
1463 const u8 *tsem_pram_data;
1464 const u8 *usem_int_table_data;
1465 const u8 *usem_pram_data;
1466 const u8 *xsem_int_table_data;
1467 const u8 *xsem_pram_data;
1468 const u8 *csem_int_table_data;
1469 const u8 *csem_pram_data;
1470#define INIT_OPS(bp) (bp->init_ops)
1471#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
1472#define INIT_DATA(bp) (bp->init_data)
1473#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
1474#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
1475#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
1476#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
1477#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
1478#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
1479#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
1480#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
1481
1482#define PHY_FW_VER_LEN 20
1483 char fw_ver[32];
1484 const struct firmware *firmware;
1485
1486 /* DCB support on/off */
1487 u16 dcb_state;
1488#define BNX2X_DCB_STATE_OFF 0
1489#define BNX2X_DCB_STATE_ON 1
1490
1491 /* DCBX engine mode */
1492 int dcbx_enabled;
1493#define BNX2X_DCBX_ENABLED_OFF 0
1494#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
1495#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
1496#define BNX2X_DCBX_ENABLED_INVALID (-1)
1497
1498 bool dcbx_mode_uset;
1499
1500 struct bnx2x_config_dcbx_params dcbx_config_params;
1501 struct bnx2x_dcbx_port_params dcbx_port_params;
1502 int dcb_version;
1503
1504 /* CAM credit pools */
1505 struct bnx2x_credit_pool_obj macs_pool;
1506
1507 /* RX_MODE object */
1508 struct bnx2x_rx_mode_obj rx_mode_obj;
1509
1510 /* MCAST object */
1511 struct bnx2x_mcast_obj mcast_obj;
1512
1513 /* RSS configuration object */
1514 struct bnx2x_rss_config_obj rss_conf_obj;
1515
1516 /* Function State controlling object */
1517 struct bnx2x_func_sp_obj func_obj;
1518
1519 unsigned long sp_state;
1520
1521 /* operation indication for the sp_rtnl task */
1522 unsigned long sp_rtnl_state;
1523
1524 /* DCBX Negotation results */
1525 struct dcbx_features dcbx_local_feat;
1526 u32 dcbx_error;
1527
1528#ifdef BCM_DCBNL
1529 struct dcbx_features dcbx_remote_feat;
1530 u32 dcbx_remote_flags;
1531#endif
1532 u32 pending_max;
1533
1534 /* multiple tx classes of service */
1535 u8 max_cos;
1536
1537 /* priority to cos mapping */
1538 u8 prio_to_cos[8];
1539};
1540
1541/* Tx queues may be less or equal to Rx queues */
1542extern int num_queues;
1543#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1544#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
1545#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1546
1547#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1548
1549#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp)
1550/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
1551
1552#define RSS_IPV4_CAP_MASK \
1553 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
1554
1555#define RSS_IPV4_TCP_CAP_MASK \
1556 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
1557
1558#define RSS_IPV6_CAP_MASK \
1559 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
1560
1561#define RSS_IPV6_TCP_CAP_MASK \
1562 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
1563
1564/* func init flags */
1565#define FUNC_FLG_RSS 0x0001
1566#define FUNC_FLG_STATS 0x0002
1567/* removed FUNC_FLG_UNMATCHED 0x0004 */
1568#define FUNC_FLG_TPA 0x0008
1569#define FUNC_FLG_SPQ 0x0010
1570#define FUNC_FLG_LEADING 0x0020 /* PF only */
1571
1572
1573struct bnx2x_func_init_params {
1574 /* dma */
1575 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
1576 dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
1577
1578 u16 func_flgs;
1579 u16 func_id; /* abs fid */
1580 u16 pf_id;
1581 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1582};
1583
1584#define for_each_eth_queue(bp, var) \
1585 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1586
1587#define for_each_nondefault_eth_queue(bp, var) \
1588 for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1589
1590#define for_each_queue(bp, var) \
1591 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1592 if (skip_queue(bp, var)) \
1593 continue; \
1594 else
1595
1596/* Skip forwarding FP */
1597#define for_each_rx_queue(bp, var) \
1598 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1599 if (skip_rx_queue(bp, var)) \
1600 continue; \
1601 else
1602
1603/* Skip OOO FP */
1604#define for_each_tx_queue(bp, var) \
1605 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1606 if (skip_tx_queue(bp, var)) \
1607 continue; \
1608 else
1609
1610#define for_each_nondefault_queue(bp, var) \
1611 for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1612 if (skip_queue(bp, var)) \
1613 continue; \
1614 else
1615
1616#define for_each_cos_in_tx_queue(fp, var) \
1617 for ((var) = 0; (var) < (fp)->max_cos; (var)++)
1618
1619/* skip rx queue
1620 * if FCOE l2 support is disabled and this is the fcoe L2 queue
1621 */
1622#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1623
1624/* skip tx queue
1625 * if FCOE l2 support is disabled and this is the fcoe L2 queue
1626 */
1627#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1628
1629#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1630
1631
1632
1633
1634/**
1635 * bnx2x_set_mac_one - configure a single MAC address
1636 *
1637 * @bp: driver handle
1638 * @mac: MAC to configure
1639 * @obj: MAC object handle
1640 * @set: if 'true' add a new MAC, otherwise - delete
1641 * @mac_type: the type of the MAC to configure (e.g. ETH, UC list)
1642 * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
1643 *
1644 * Configures one MAC according to provided parameters or continues the
1645 * execution of previously scheduled commands if RAMROD_CONT is set in
1646 * ramrod_flags.
1647 *
1648 * Returns zero if operation has successfully completed, a positive value if the
1649 * operation has been successfully scheduled and a negative - if a requested
1650 * operations has failed.
1651 */
1652int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
1653 struct bnx2x_vlan_mac_obj *obj, bool set,
1654 int mac_type, unsigned long *ramrod_flags);
1655/**
1656 * Deletes all MACs configured for the specific MAC object.
1657 *
1658 * @param bp Function driver instance
1659 * @param mac_obj MAC object to cleanup
1660 *
1661 * @return zero if all MACs were cleaned
1662 */
1663
1664/**
1665 * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
1666 *
1667 * @bp: driver handle
1668 * @mac_obj: MAC object handle
1669 * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC)
1670 * @wait_for_comp: if 'true' block until completion
1671 *
1672 * Deletes all MACs of the specific type (e.g. ETH, UC list).
1673 *
1674 * Returns zero if operation has successfully completed, a positive value if the
1675 * operation has been successfully scheduled and a negative - if a requested
1676 * operations has failed.
1677 */
1678int bnx2x_del_all_macs(struct bnx2x *bp,
1679 struct bnx2x_vlan_mac_obj *mac_obj,
1680 int mac_type, bool wait_for_comp);
1681
1682/* Init Function API */
1683void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
1684int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
1685int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1686int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
1687int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1688void bnx2x_read_mf_cfg(struct bnx2x *bp);
1689
1690
1691/* dmae */
1692void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
1693void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
1694 u32 len32);
1695void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1696u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1697u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
1698u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
1699 bool with_comp, u8 comp_type);
1700
1701
1702void bnx2x_calc_fc_adv(struct bnx2x *bp);
1703int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1704 u32 data_hi, u32 data_lo, int cmd_type);
1705void bnx2x_update_coalesce(struct bnx2x *bp);
1706int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
1707
1708static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1709 int wait)
1710{
1711 u32 val;
1712
1713 do {
1714 val = REG_RD(bp, reg);
1715 if (val == expected)
1716 break;
1717 ms -= wait;
1718 msleep(wait);
1719
1720 } while (ms > 0);
1721
1722 return val;
1723}
1724
1725#define BNX2X_ILT_ZALLOC(x, y, size) \
1726 do { \
1727 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
1728 if (x) \
1729 memset(x, 0, size); \
1730 } while (0)
1731
1732#define BNX2X_ILT_FREE(x, y, size) \
1733 do { \
1734 if (x) { \
1735 dma_free_coherent(&bp->pdev->dev, size, x, y); \
1736 x = NULL; \
1737 y = 0; \
1738 } \
1739 } while (0)
1740
1741#define ILOG2(x) (ilog2((x)))
1742
1743#define ILT_NUM_PAGE_ENTRIES (3072)
1744/* In 57710/11 we use whole table since we have 8 func
1745 * In 57712 we have only 4 func, but use same size per func, then only half of
1746 * the table in use
1747 */
1748#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
1749
1750#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
1751/*
1752 * the phys address is shifted right 12 bits and has an added
1753 * 1=valid bit added to the 53rd bit
1754 * then since this is a wide register(TM)
1755 * we split it into two 32 bit writes
1756 */
1757#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
1758#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
1759
1760/* load/unload mode */
1761#define LOAD_NORMAL 0
1762#define LOAD_OPEN 1
1763#define LOAD_DIAG 2
1764#define UNLOAD_NORMAL 0
1765#define UNLOAD_CLOSE 1
1766#define UNLOAD_RECOVERY 2
1767
1768
1769/* DMAE command defines */
1770#define DMAE_TIMEOUT -1
1771#define DMAE_PCI_ERROR -2 /* E2 and onward */
1772#define DMAE_NOT_RDY -3
1773#define DMAE_PCI_ERR_FLAG 0x80000000
1774
1775#define DMAE_SRC_PCI 0
1776#define DMAE_SRC_GRC 1
1777
1778#define DMAE_DST_NONE 0
1779#define DMAE_DST_PCI 1
1780#define DMAE_DST_GRC 2
1781
1782#define DMAE_COMP_PCI 0
1783#define DMAE_COMP_GRC 1
1784
1785/* E2 and onward - PCI error handling in the completion */
1786
1787#define DMAE_COMP_REGULAR 0
1788#define DMAE_COM_SET_ERR 1
1789
1790#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
1791 DMAE_COMMAND_SRC_SHIFT)
1792#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
1793 DMAE_COMMAND_SRC_SHIFT)
1794
1795#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
1796 DMAE_COMMAND_DST_SHIFT)
1797#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
1798 DMAE_COMMAND_DST_SHIFT)
1799
1800#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
1801 DMAE_COMMAND_C_DST_SHIFT)
1802#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
1803 DMAE_COMMAND_C_DST_SHIFT)
1804
1805#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
1806
1807#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
1808#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
1809#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
1810#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
1811
1812#define DMAE_CMD_PORT_0 0
1813#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
1814
1815#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
1816#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
1817#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
1818
1819#define DMAE_SRC_PF 0
1820#define DMAE_SRC_VF 1
1821
1822#define DMAE_DST_PF 0
1823#define DMAE_DST_VF 1
1824
1825#define DMAE_C_SRC 0
1826#define DMAE_C_DST 1
1827
1828#define DMAE_LEN32_RD_MAX 0x80
1829#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
1830
1831#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
1832 indicates eror */
1833
1834#define MAX_DMAE_C_PER_PORT 8
1835#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1836 BP_VN(bp))
1837#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1838 E1HVN_MAX)
1839
1840/* PCIE link and speed */
1841#define PCICFG_LINK_WIDTH 0x1f00000
1842#define PCICFG_LINK_WIDTH_SHIFT 20
1843#define PCICFG_LINK_SPEED 0xf0000
1844#define PCICFG_LINK_SPEED_SHIFT 16
1845
1846
1847#define BNX2X_NUM_TESTS 7
1848
1849#define BNX2X_PHY_LOOPBACK 0
1850#define BNX2X_MAC_LOOPBACK 1
1851#define BNX2X_PHY_LOOPBACK_FAILED 1
1852#define BNX2X_MAC_LOOPBACK_FAILED 2
1853#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
1854 BNX2X_PHY_LOOPBACK_FAILED)
1855
1856
1857#define STROM_ASSERT_ARRAY_SIZE 50
1858
1859
1860/* must be used on a CID before placing it on a HW ring */
1861#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1862 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
1863 (x))
1864
1865#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
1866#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1867
1868
1869#define BNX2X_BTR 4
1870#define MAX_SPQ_PENDING 8
1871
1872/* CMNG constants, as derived from system spec calculations */
1873/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
1874#define DEF_MIN_RATE 100
1875/* resolution of the rate shaping timer - 400 usec */
1876#define RS_PERIODIC_TIMEOUT_USEC 400
1877/* number of bytes in single QM arbitration cycle -
1878 * coefficient for calculating the fairness timer */
1879#define QM_ARB_BYTES 160000
1880/* resolution of Min algorithm 1:100 */
1881#define MIN_RES 100
1882/* how many bytes above threshold for the minimal credit of Min algorithm*/
1883#define MIN_ABOVE_THRESH 32768
1884/* Fairness algorithm integration time coefficient -
1885 * for calculating the actual Tfair */
1886#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
1887/* Memory of fairness algorithm . 2 cycles */
1888#define FAIR_MEM 2
1889
1890
1891#define ATTN_NIG_FOR_FUNC (1L << 8)
1892#define ATTN_SW_TIMER_4_FUNC (1L << 9)
1893#define GPIO_2_FUNC (1L << 10)
1894#define GPIO_3_FUNC (1L << 11)
1895#define GPIO_4_FUNC (1L << 12)
1896#define ATTN_GENERAL_ATTN_1 (1L << 13)
1897#define ATTN_GENERAL_ATTN_2 (1L << 14)
1898#define ATTN_GENERAL_ATTN_3 (1L << 15)
1899#define ATTN_GENERAL_ATTN_4 (1L << 13)
1900#define ATTN_GENERAL_ATTN_5 (1L << 14)
1901#define ATTN_GENERAL_ATTN_6 (1L << 15)
1902
1903#define ATTN_HARD_WIRED_MASK 0xff00
1904#define ATTENTION_ID 4
1905
1906
1907/* stuff added to make the code fit 80Col */
1908
1909#define BNX2X_PMF_LINK_ASSERT \
1910 GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
1911
1912#define BNX2X_MC_ASSERT_BITS \
1913 (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1914 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1915 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1916 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
1917
1918#define BNX2X_MCP_ASSERT \
1919 GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
1920
1921#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
1922#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
1923 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
1924 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
1925 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
1926 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
1927 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
1928
1929#define HW_INTERRUT_ASSERT_SET_0 \
1930 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
1931 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
1932 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
1933 AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
1934#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
1935 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
1936 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
1937 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
1938 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
1939 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
1940 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
1941#define HW_INTERRUT_ASSERT_SET_1 \
1942 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
1943 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
1944 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
1945 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
1946 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
1947 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
1948 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
1949 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
1950 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
1951 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
1952 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
1953#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
1954 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
1955 AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
1956 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
1957 AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
1958 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
1959 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
1960 AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
1961 AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
1962 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
1963 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
1964 AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
1965 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
1966 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
1967 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
1968 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
1969#define HW_INTERRUT_ASSERT_SET_2 \
1970 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
1971 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
1972 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
1973 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
1974 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
1975#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
1976 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
1977 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
1978 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
1979 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
1980 AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
1981 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1982 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1983
1984#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1985 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1986 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1987 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1988
1989#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
1990 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
1991
1992#define RSS_FLAGS(bp) \
1993 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
1994 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
1995 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
1996 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
1997 (bp->multi_mode << \
1998 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1999#define MULTI_MASK 0x7f
2000
2001
2002#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
2003#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
2004#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
2005#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func)
2006
2007#define DEF_USB_IGU_INDEX_OFF \
2008 offsetof(struct cstorm_def_status_block_u, igu_index)
2009#define DEF_CSB_IGU_INDEX_OFF \
2010 offsetof(struct cstorm_def_status_block_c, igu_index)
2011#define DEF_XSB_IGU_INDEX_OFF \
2012 offsetof(struct xstorm_def_status_block, igu_index)
2013#define DEF_TSB_IGU_INDEX_OFF \
2014 offsetof(struct tstorm_def_status_block, igu_index)
2015
2016#define DEF_USB_SEGMENT_OFF \
2017 offsetof(struct cstorm_def_status_block_u, segment)
2018#define DEF_CSB_SEGMENT_OFF \
2019 offsetof(struct cstorm_def_status_block_c, segment)
2020#define DEF_XSB_SEGMENT_OFF \
2021 offsetof(struct xstorm_def_status_block, segment)
2022#define DEF_TSB_SEGMENT_OFF \
2023 offsetof(struct tstorm_def_status_block, segment)
2024
2025#define BNX2X_SP_DSB_INDEX \
2026 (&bp->def_status_blk->sp_sb.\
2027 index_values[HC_SP_INDEX_ETH_DEF_CONS])
2028
2029#define SET_FLAG(value, mask, flag) \
2030 do {\
2031 (value) &= ~(mask);\
2032 (value) |= ((flag) << (mask##_SHIFT));\
2033 } while (0)
2034
2035#define GET_FLAG(value, mask) \
2036 (((value) & (mask)) >> (mask##_SHIFT))
2037
2038#define GET_FIELD(value, fname) \
2039 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
2040
2041#define CAM_IS_INVALID(x) \
2042 (GET_FLAG(x.flags, \
2043 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
2044 (T_ETH_MAC_COMMAND_INVALIDATE))
2045
2046/* Number of u32 elements in MC hash array */
2047#define MC_HASH_SIZE 8
2048#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
2049 TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
2050
2051
2052#ifndef PXP2_REG_PXP2_INT_STS
2053#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
2054#endif
2055
2056#ifndef ETH_MAX_RX_CLIENTS_E2
2057#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
2058#endif
2059
2060#define BNX2X_VPD_LEN 128
2061#define VENDOR_ID_LEN 4
2062
2063/* Congestion management fairness mode */
2064#define CMNG_FNS_NONE 0
2065#define CMNG_FNS_MINMAX 1
2066
2067#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
2068#define HC_SEG_ACCESS_ATTN 4
2069#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
2070
2071static const u32 dmae_reg_go_c[] = {
2072 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
2073 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
2074 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
2075 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
2076};
2077
2078void bnx2x_set_ethtool_ops(struct net_device *netdev);
2079void bnx2x_notify_link_changed(struct bnx2x *bp);
2080#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
new file mode 100644
index 00000000000..c4cbf973641
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,3593 @@
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18#include <linux/etherdevice.h>
19#include <linux/if_vlan.h>
20#include <linux/interrupt.h>
21#include <linux/ip.h>
22#include <net/ipv6.h>
23#include <net/ip6_checksum.h>
24#include <linux/firmware.h>
25#include <linux/prefetch.h>
26#include "bnx2x_cmn.h"
27#include "bnx2x_init.h"
28#include "bnx2x_sp.h"
29
30
31
32/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
50
51 fp->bp = bp;
52 fp->index = index;
53 if (IS_ETH_FP(fp))
54 fp->max_cos = bp->max_cos;
55 else
56 /* Special queues support only one CoS */
57 fp->max_cos = 1;
58
59 /*
60 * set the tpa flag for each queue. The tpa flag determines the queue
61 * minimal size so it must be set prior to queue memory allocation
62 */
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64
65#ifdef BCM_CNIC
66 /* We don't want TPA on an FCoE L2 ring */
67 if (IS_FCOE_FP(fp))
68 fp->disable_tpa = 1;
69#endif
70}
71
72/**
73 * bnx2x_move_fp - move content of the fastpath structure.
74 *
75 * @bp: driver handle
76 * @from: source FP index
77 * @to: destination FP index
78 *
79 * Makes sure the contents of the bp->fp[to].napi is kept
80 * intact.
81 */
82static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
83{
84 struct bnx2x_fastpath *from_fp = &bp->fp[from];
85 struct bnx2x_fastpath *to_fp = &bp->fp[to];
86 struct napi_struct orig_napi = to_fp->napi;
87 /* Move bnx2x_fastpath contents */
88 memcpy(to_fp, from_fp, sizeof(*to_fp));
89 to_fp->index = to;
90
91 /* Restore the NAPI object as it has been already initialized */
92 to_fp->napi = orig_napi;
93}
94
95int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
96
97/* free skb in the packet ring at pos idx
98 * return idx of last bd freed
99 */
100static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
101 u16 idx)
102{
103 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
104 struct eth_tx_start_bd *tx_start_bd;
105 struct eth_tx_bd *tx_data_bd;
106 struct sk_buff *skb = tx_buf->skb;
107 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
108 int nbd;
109
110 /* prefetch skb end pointer to speedup dev_kfree_skb() */
111 prefetch(&skb->end);
112
113 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
114 txdata->txq_index, idx, tx_buf, skb);
115
116 /* unmap first bd */
117 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
118 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
119 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
120 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
121
122
123 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
124#ifdef BNX2X_STOP_ON_ERROR
125 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
126 BNX2X_ERR("BAD nbd!\n");
127 bnx2x_panic();
128 }
129#endif
130 new_cons = nbd + tx_buf->first_bd;
131
132 /* Get the next bd */
133 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
134
135 /* Skip a parse bd... */
136 --nbd;
137 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
138
139 /* ...and the TSO split header bd since they have no mapping */
140 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
141 --nbd;
142 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
143 }
144
145 /* now free frags */
146 while (nbd > 0) {
147
148 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
149 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
150 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
151 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
152 if (--nbd)
153 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
154 }
155
156 /* release skb */
157 WARN_ON(!skb);
158 dev_kfree_skb_any(skb);
159 tx_buf->first_bd = 0;
160 tx_buf->skb = NULL;
161
162 return new_cons;
163}
164
165int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
166{
167 struct netdev_queue *txq;
168 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
169
170#ifdef BNX2X_STOP_ON_ERROR
171 if (unlikely(bp->panic))
172 return -1;
173#endif
174
175 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
176 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
177 sw_cons = txdata->tx_pkt_cons;
178
179 while (sw_cons != hw_cons) {
180 u16 pkt_cons;
181
182 pkt_cons = TX_BD(sw_cons);
183
184 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
185 " pkt_cons %u\n",
186 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
187
188 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
189 sw_cons++;
190 }
191
192 txdata->tx_pkt_cons = sw_cons;
193 txdata->tx_bd_cons = bd_cons;
194
195 /* Need to make the tx_bd_cons update visible to start_xmit()
196 * before checking for netif_tx_queue_stopped(). Without the
197 * memory barrier, there is a small possibility that
198 * start_xmit() will miss it and cause the queue to be stopped
199 * forever.
200 * On the other hand we need an rmb() here to ensure the proper
201 * ordering of bit testing in the following
202 * netif_tx_queue_stopped(txq) call.
203 */
204 smp_mb();
205
206 if (unlikely(netif_tx_queue_stopped(txq))) {
207 /* Taking tx_lock() is needed to prevent reenabling the queue
208 * while it's empty. This could have happen if rx_action() gets
209 * suspended in bnx2x_tx_int() after the condition before
210 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
211 *
212 * stops the queue->sees fresh tx_bd_cons->releases the queue->
213 * sends some packets consuming the whole queue again->
214 * stops the queue
215 */
216
217 __netif_tx_lock(txq, smp_processor_id());
218
219 if ((netif_tx_queue_stopped(txq)) &&
220 (bp->state == BNX2X_STATE_OPEN) &&
221 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
222 netif_tx_wake_queue(txq);
223
224 __netif_tx_unlock(txq);
225 }
226 return 0;
227}
228
229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
230 u16 idx)
231{
232 u16 last_max = fp->last_max_sge;
233
234 if (SUB_S16(idx, last_max) > 0)
235 fp->last_max_sge = idx;
236}
237
238static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
239 struct eth_fast_path_rx_cqe *fp_cqe)
240{
241 struct bnx2x *bp = fp->bp;
242 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
243 le16_to_cpu(fp_cqe->len_on_bd)) >>
244 SGE_PAGE_SHIFT;
245 u16 last_max, last_elem, first_elem;
246 u16 delta = 0;
247 u16 i;
248
249 if (!sge_len)
250 return;
251
252 /* First mark all used pages */
253 for (i = 0; i < sge_len; i++)
254 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
255 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
256
257 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
258 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
259
260 /* Here we assume that the last SGE index is the biggest */
261 prefetch((void *)(fp->sge_mask));
262 bnx2x_update_last_max_sge(fp,
263 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
264
265 last_max = RX_SGE(fp->last_max_sge);
266 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
267 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
268
269 /* If ring is not full */
270 if (last_elem + 1 != first_elem)
271 last_elem++;
272
273 /* Now update the prod */
274 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
275 if (likely(fp->sge_mask[i]))
276 break;
277
278 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
279 delta += BIT_VEC64_ELEM_SZ;
280 }
281
282 if (delta > 0) {
283 fp->rx_sge_prod += delta;
284 /* clear page-end entries */
285 bnx2x_clear_sge_mask_next_elems(fp);
286 }
287
288 DP(NETIF_MSG_RX_STATUS,
289 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
290 fp->last_max_sge, fp->rx_sge_prod);
291}
292
293static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
294 struct sk_buff *skb, u16 cons, u16 prod,
295 struct eth_fast_path_rx_cqe *cqe)
296{
297 struct bnx2x *bp = fp->bp;
298 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
299 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
300 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
301 dma_addr_t mapping;
302 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
303 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
304
305 /* print error if current state != stop */
306 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
307 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
308
309 /* Try to map an empty skb from the aggregation info */
310 mapping = dma_map_single(&bp->pdev->dev,
311 first_buf->skb->data,
312 fp->rx_buf_size, DMA_FROM_DEVICE);
313 /*
314 * ...if it fails - move the skb from the consumer to the producer
315 * and set the current aggregation state as ERROR to drop it
316 * when TPA_STOP arrives.
317 */
318
319 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
320 /* Move the BD from the consumer to the producer */
321 bnx2x_reuse_rx_skb(fp, cons, prod);
322 tpa_info->tpa_state = BNX2X_TPA_ERROR;
323 return;
324 }
325
326 /* move empty skb from pool to prod */
327 prod_rx_buf->skb = first_buf->skb;
328 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
329 /* point prod_bd to new skb */
330 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
331 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
332
333 /* move partial skb from cons to pool (don't unmap yet) */
334 *first_buf = *cons_rx_buf;
335
336 /* mark bin state as START */
337 tpa_info->parsing_flags =
338 le16_to_cpu(cqe->pars_flags.flags);
339 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
340 tpa_info->tpa_state = BNX2X_TPA_START;
341 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
342 tpa_info->placement_offset = cqe->placement_offset;
343
344#ifdef BNX2X_STOP_ON_ERROR
345 fp->tpa_queue_used |= (1 << queue);
346#ifdef _ASM_GENERIC_INT_L64_H
347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
348#else
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
350#endif
351 fp->tpa_queue_used);
352#endif
353}
354
355/* Timestamp option length allowed for TPA aggregation:
356 *
357 * nop nop kind length echo val
358 */
359#define TPA_TSTAMP_OPT_LEN 12
360/**
361 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
362 *
363 * @bp: driver handle
364 * @parsing_flags: parsing flags from the START CQE
365 * @len_on_bd: total length of the first packet for the
366 * aggregation.
367 *
368 * Approximate value of the MSS for this aggregation calculated using
369 * the first packet of it.
370 */
371static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
372 u16 len_on_bd)
373{
374 /*
375 * TPA arrgregation won't have either IP options or TCP options
376 * other than timestamp or IPv6 extension headers.
377 */
378 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
379
380 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
381 PRS_FLAG_OVERETH_IPV6)
382 hdrs_len += sizeof(struct ipv6hdr);
383 else /* IPv4 */
384 hdrs_len += sizeof(struct iphdr);
385
386
387 /* Check if there was a TCP timestamp, if there is it's will
388 * always be 12 bytes length: nop nop kind length echo val.
389 *
390 * Otherwise FW would close the aggregation.
391 */
392 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
393 hdrs_len += TPA_TSTAMP_OPT_LEN;
394
395 return len_on_bd - hdrs_len;
396}
397
398static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
399 u16 queue, struct sk_buff *skb,
400 struct eth_end_agg_rx_cqe *cqe,
401 u16 cqe_idx)
402{
403 struct sw_rx_page *rx_pg, old_rx_pg;
404 u32 i, frag_len, frag_size, pages;
405 int err;
406 int j;
407 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
408 u16 len_on_bd = tpa_info->len_on_bd;
409
410 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
411 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
412
413 /* This is needed in order to enable forwarding support */
414 if (frag_size)
415 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
416 tpa_info->parsing_flags, len_on_bd);
417
418#ifdef BNX2X_STOP_ON_ERROR
419 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
420 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
421 pages, cqe_idx);
422 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
423 bnx2x_panic();
424 return -EINVAL;
425 }
426#endif
427
428 /* Run through the SGL and compose the fragmented skb */
429 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
430 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
431
432 /* FW gives the indices of the SGE as if the ring is an array
433 (meaning that "next" element will consume 2 indices) */
434 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
435 rx_pg = &fp->rx_page_ring[sge_idx];
436 old_rx_pg = *rx_pg;
437
438 /* If we fail to allocate a substitute page, we simply stop
439 where we are and drop the whole packet */
440 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
441 if (unlikely(err)) {
442 fp->eth_q_stats.rx_skb_alloc_failed++;
443 return err;
444 }
445
446 /* Unmap the page as we r going to pass it to the stack */
447 dma_unmap_page(&bp->pdev->dev,
448 dma_unmap_addr(&old_rx_pg, mapping),
449 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
450
451 /* Add one frag and update the appropriate fields in the skb */
452 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
453
454 skb->data_len += frag_len;
455 skb->truesize += frag_len;
456 skb->len += frag_len;
457
458 frag_size -= frag_len;
459 }
460
461 return 0;
462}
463
464static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
465 u16 queue, struct eth_end_agg_rx_cqe *cqe,
466 u16 cqe_idx)
467{
468 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
469 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
470 u8 pad = tpa_info->placement_offset;
471 u16 len = tpa_info->len_on_bd;
472 struct sk_buff *skb = rx_buf->skb;
473 /* alloc new skb */
474 struct sk_buff *new_skb;
475 u8 old_tpa_state = tpa_info->tpa_state;
476
477 tpa_info->tpa_state = BNX2X_TPA_STOP;
478
479 /* If we there was an error during the handling of the TPA_START -
480 * drop this aggregation.
481 */
482 if (old_tpa_state == BNX2X_TPA_ERROR)
483 goto drop;
484
485 /* Try to allocate the new skb */
486 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
487
488 /* Unmap skb in the pool anyway, as we are going to change
489 pool entry status to BNX2X_TPA_STOP even if new skb allocation
490 fails. */
491 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
492 fp->rx_buf_size, DMA_FROM_DEVICE);
493
494 if (likely(new_skb)) {
495 prefetch(skb);
496 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
497
498#ifdef BNX2X_STOP_ON_ERROR
499 if (pad + len > fp->rx_buf_size) {
500 BNX2X_ERR("skb_put is about to fail... "
501 "pad %d len %d rx_buf_size %d\n",
502 pad, len, fp->rx_buf_size);
503 bnx2x_panic();
504 return;
505 }
506#endif
507
508 skb_reserve(skb, pad);
509 skb_put(skb, len);
510
511 skb->protocol = eth_type_trans(skb, bp->dev);
512 skb->ip_summed = CHECKSUM_UNNECESSARY;
513
514 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
515 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
516 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
517 napi_gro_receive(&fp->napi, skb);
518 } else {
519 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
520 " - dropping packet!\n");
521 dev_kfree_skb_any(skb);
522 }
523
524
525 /* put new skb in bin */
526 rx_buf->skb = new_skb;
527
528 return;
529 }
530
531drop:
532 /* drop the packet and keep the buffer in the bin */
533 DP(NETIF_MSG_RX_STATUS,
534 "Failed to allocate or map a new skb - dropping packet!\n");
535 fp->eth_q_stats.rx_skb_alloc_failed++;
536}
537
538/* Set Toeplitz hash value in the skb using the value from the
539 * CQE (calculated by HW).
540 */
541static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
542 struct sk_buff *skb)
543{
544 /* Set Toeplitz hash from CQE */
545 if ((bp->dev->features & NETIF_F_RXHASH) &&
546 (cqe->fast_path_cqe.status_flags &
547 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
548 skb->rxhash =
549 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
550}
551
552int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
553{
554 struct bnx2x *bp = fp->bp;
555 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
556 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
557 int rx_pkt = 0;
558
559#ifdef BNX2X_STOP_ON_ERROR
560 if (unlikely(bp->panic))
561 return 0;
562#endif
563
564 /* CQ "next element" is of the size of the regular element,
565 that's why it's ok here */
566 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
567 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
568 hw_comp_cons++;
569
570 bd_cons = fp->rx_bd_cons;
571 bd_prod = fp->rx_bd_prod;
572 bd_prod_fw = bd_prod;
573 sw_comp_cons = fp->rx_comp_cons;
574 sw_comp_prod = fp->rx_comp_prod;
575
576 /* Memory barrier necessary as speculative reads of the rx
577 * buffer can be ahead of the index in the status block
578 */
579 rmb();
580
581 DP(NETIF_MSG_RX_STATUS,
582 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
583 fp->index, hw_comp_cons, sw_comp_cons);
584
585 while (sw_comp_cons != hw_comp_cons) {
586 struct sw_rx_bd *rx_buf = NULL;
587 struct sk_buff *skb;
588 union eth_rx_cqe *cqe;
589 struct eth_fast_path_rx_cqe *cqe_fp;
590 u8 cqe_fp_flags;
591 enum eth_rx_cqe_type cqe_fp_type;
592 u16 len, pad;
593
594#ifdef BNX2X_STOP_ON_ERROR
595 if (unlikely(bp->panic))
596 return 0;
597#endif
598
599 comp_ring_cons = RCQ_BD(sw_comp_cons);
600 bd_prod = RX_BD(bd_prod);
601 bd_cons = RX_BD(bd_cons);
602
603 /* Prefetch the page containing the BD descriptor
604 at producer's index. It will be needed when new skb is
605 allocated */
606 prefetch((void *)(PAGE_ALIGN((unsigned long)
607 (&fp->rx_desc_ring[bd_prod])) -
608 PAGE_SIZE + 1));
609
610 cqe = &fp->rx_comp_ring[comp_ring_cons];
611 cqe_fp = &cqe->fast_path_cqe;
612 cqe_fp_flags = cqe_fp->type_error_flags;
613 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
614
615 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
616 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
617 cqe_fp_flags, cqe_fp->status_flags,
618 le32_to_cpu(cqe_fp->rss_hash_result),
619 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
620
621 /* is this a slowpath msg? */
622 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
623 bnx2x_sp_event(fp, cqe);
624 goto next_cqe;
625
626 /* this is an rx packet */
627 } else {
628 rx_buf = &fp->rx_buf_ring[bd_cons];
629 skb = rx_buf->skb;
630 prefetch(skb);
631
632 if (!CQE_TYPE_FAST(cqe_fp_type)) {
633#ifdef BNX2X_STOP_ON_ERROR
634 /* sanity check */
635 if (fp->disable_tpa &&
636 (CQE_TYPE_START(cqe_fp_type) ||
637 CQE_TYPE_STOP(cqe_fp_type)))
638 BNX2X_ERR("START/STOP packet while "
639 "disable_tpa type %x\n",
640 CQE_TYPE(cqe_fp_type));
641#endif
642
643 if (CQE_TYPE_START(cqe_fp_type)) {
644 u16 queue = cqe_fp->queue_index;
645 DP(NETIF_MSG_RX_STATUS,
646 "calling tpa_start on queue %d\n",
647 queue);
648
649 bnx2x_tpa_start(fp, queue, skb,
650 bd_cons, bd_prod,
651 cqe_fp);
652
653 /* Set Toeplitz hash for LRO skb */
654 bnx2x_set_skb_rxhash(bp, cqe, skb);
655
656 goto next_rx;
657
658 } else {
659 u16 queue =
660 cqe->end_agg_cqe.queue_index;
661 DP(NETIF_MSG_RX_STATUS,
662 "calling tpa_stop on queue %d\n",
663 queue);
664
665 bnx2x_tpa_stop(bp, fp, queue,
666 &cqe->end_agg_cqe,
667 comp_ring_cons);
668#ifdef BNX2X_STOP_ON_ERROR
669 if (bp->panic)
670 return 0;
671#endif
672
673 bnx2x_update_sge_prod(fp, cqe_fp);
674 goto next_cqe;
675 }
676 }
677 /* non TPA */
678 len = le16_to_cpu(cqe_fp->pkt_len);
679 pad = cqe_fp->placement_offset;
680 dma_sync_single_for_cpu(&bp->pdev->dev,
681 dma_unmap_addr(rx_buf, mapping),
682 pad + RX_COPY_THRESH,
683 DMA_FROM_DEVICE);
684 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
685
686 /* is this an error packet? */
687 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
688 DP(NETIF_MSG_RX_ERR,
689 "ERROR flags %x rx packet %u\n",
690 cqe_fp_flags, sw_comp_cons);
691 fp->eth_q_stats.rx_err_discard_pkt++;
692 goto reuse_rx;
693 }
694
695 /* Since we don't have a jumbo ring
696 * copy small packets if mtu > 1500
697 */
698 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
699 (len <= RX_COPY_THRESH)) {
700 struct sk_buff *new_skb;
701
702 new_skb = netdev_alloc_skb(bp->dev, len + pad);
703 if (new_skb == NULL) {
704 DP(NETIF_MSG_RX_ERR,
705 "ERROR packet dropped "
706 "because of alloc failure\n");
707 fp->eth_q_stats.rx_skb_alloc_failed++;
708 goto reuse_rx;
709 }
710
711 /* aligned copy */
712 skb_copy_from_linear_data_offset(skb, pad,
713 new_skb->data + pad, len);
714 skb_reserve(new_skb, pad);
715 skb_put(new_skb, len);
716
717 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
718
719 skb = new_skb;
720
721 } else
722 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
723 dma_unmap_single(&bp->pdev->dev,
724 dma_unmap_addr(rx_buf, mapping),
725 fp->rx_buf_size,
726 DMA_FROM_DEVICE);
727 skb_reserve(skb, pad);
728 skb_put(skb, len);
729
730 } else {
731 DP(NETIF_MSG_RX_ERR,
732 "ERROR packet dropped because "
733 "of alloc failure\n");
734 fp->eth_q_stats.rx_skb_alloc_failed++;
735reuse_rx:
736 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
737 goto next_rx;
738 }
739
740 skb->protocol = eth_type_trans(skb, bp->dev);
741
742 /* Set Toeplitz hash for a none-LRO skb */
743 bnx2x_set_skb_rxhash(bp, cqe, skb);
744
745 skb_checksum_none_assert(skb);
746
747 if (bp->dev->features & NETIF_F_RXCSUM) {
748
749 if (likely(BNX2X_RX_CSUM_OK(cqe)))
750 skb->ip_summed = CHECKSUM_UNNECESSARY;
751 else
752 fp->eth_q_stats.hw_csum_err++;
753 }
754 }
755
756 skb_record_rx_queue(skb, fp->index);
757
758 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
759 PARSING_FLAGS_VLAN)
760 __vlan_hwaccel_put_tag(skb,
761 le16_to_cpu(cqe_fp->vlan_tag));
762 napi_gro_receive(&fp->napi, skb);
763
764
765next_rx:
766 rx_buf->skb = NULL;
767
768 bd_cons = NEXT_RX_IDX(bd_cons);
769 bd_prod = NEXT_RX_IDX(bd_prod);
770 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
771 rx_pkt++;
772next_cqe:
773 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
774 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
775
776 if (rx_pkt == budget)
777 break;
778 } /* while */
779
780 fp->rx_bd_cons = bd_cons;
781 fp->rx_bd_prod = bd_prod_fw;
782 fp->rx_comp_cons = sw_comp_cons;
783 fp->rx_comp_prod = sw_comp_prod;
784
785 /* Update producers */
786 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
787 fp->rx_sge_prod);
788
789 fp->rx_pkt += rx_pkt;
790 fp->rx_calls++;
791
792 return rx_pkt;
793}
794
795static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
796{
797 struct bnx2x_fastpath *fp = fp_cookie;
798 struct bnx2x *bp = fp->bp;
799 u8 cos;
800
801 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
802 "[fp %d fw_sd %d igusb %d]\n",
803 fp->index, fp->fw_sb_id, fp->igu_sb_id);
804 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
805
806#ifdef BNX2X_STOP_ON_ERROR
807 if (unlikely(bp->panic))
808 return IRQ_HANDLED;
809#endif
810
811 /* Handle Rx and Tx according to MSI-X vector */
812 prefetch(fp->rx_cons_sb);
813
814 for_each_cos_in_tx_queue(fp, cos)
815 prefetch(fp->txdata[cos].tx_cons_sb);
816
817 prefetch(&fp->sb_running_index[SM_RX_ID]);
818 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
819
820 return IRQ_HANDLED;
821}
822
823/* HW Lock for shared dual port PHYs */
824void bnx2x_acquire_phy_lock(struct bnx2x *bp)
825{
826 mutex_lock(&bp->port.phy_mutex);
827
828 if (bp->port.need_hw_lock)
829 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
830}
831
832void bnx2x_release_phy_lock(struct bnx2x *bp)
833{
834 if (bp->port.need_hw_lock)
835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
836
837 mutex_unlock(&bp->port.phy_mutex);
838}
839
840/* calculates MF speed according to current linespeed and MF configuration */
841u16 bnx2x_get_mf_speed(struct bnx2x *bp)
842{
843 u16 line_speed = bp->link_vars.line_speed;
844 if (IS_MF(bp)) {
845 u16 maxCfg = bnx2x_extract_max_cfg(bp,
846 bp->mf_config[BP_VN(bp)]);
847
848 /* Calculate the current MAX line speed limit for the MF
849 * devices
850 */
851 if (IS_MF_SI(bp))
852 line_speed = (line_speed * maxCfg) / 100;
853 else { /* SD mode */
854 u16 vn_max_rate = maxCfg * 100;
855
856 if (vn_max_rate < line_speed)
857 line_speed = vn_max_rate;
858 }
859 }
860
861 return line_speed;
862}
863
864/**
865 * bnx2x_fill_report_data - fill link report data to report
866 *
867 * @bp: driver handle
868 * @data: link state to update
869 *
870 * It uses a none-atomic bit operations because is called under the mutex.
871 */
872static inline void bnx2x_fill_report_data(struct bnx2x *bp,
873 struct bnx2x_link_report_data *data)
874{
875 u16 line_speed = bnx2x_get_mf_speed(bp);
876
877 memset(data, 0, sizeof(*data));
878
879 /* Fill the report data: efective line speed */
880 data->line_speed = line_speed;
881
882 /* Link is down */
883 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
884 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
885 &data->link_report_flags);
886
887 /* Full DUPLEX */
888 if (bp->link_vars.duplex == DUPLEX_FULL)
889 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
890
891 /* Rx Flow Control is ON */
892 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
893 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
894
895 /* Tx Flow Control is ON */
896 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
897 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
898}
899
900/**
901 * bnx2x_link_report - report link status to OS.
902 *
903 * @bp: driver handle
904 *
905 * Calls the __bnx2x_link_report() under the same locking scheme
906 * as a link/PHY state managing code to ensure a consistent link
907 * reporting.
908 */
909
910void bnx2x_link_report(struct bnx2x *bp)
911{
912 bnx2x_acquire_phy_lock(bp);
913 __bnx2x_link_report(bp);
914 bnx2x_release_phy_lock(bp);
915}
916
917/**
918 * __bnx2x_link_report - report link status to OS.
919 *
920 * @bp: driver handle
921 *
922 * None atomic inmlementation.
923 * Should be called under the phy_lock.
924 */
925void __bnx2x_link_report(struct bnx2x *bp)
926{
927 struct bnx2x_link_report_data cur_data;
928
929 /* reread mf_cfg */
930 if (!CHIP_IS_E1(bp))
931 bnx2x_read_mf_cfg(bp);
932
933 /* Read the current link report info */
934 bnx2x_fill_report_data(bp, &cur_data);
935
936 /* Don't report link down or exactly the same link status twice */
937 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
938 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
939 &bp->last_reported_link.link_report_flags) &&
940 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
941 &cur_data.link_report_flags)))
942 return;
943
944 bp->link_cnt++;
945
946 /* We are going to report a new link parameters now -
947 * remember the current data for the next time.
948 */
949 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
950
951 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
952 &cur_data.link_report_flags)) {
953 netif_carrier_off(bp->dev);
954 netdev_err(bp->dev, "NIC Link is Down\n");
955 return;
956 } else {
957 netif_carrier_on(bp->dev);
958 netdev_info(bp->dev, "NIC Link is Up, ");
959 pr_cont("%d Mbps ", cur_data.line_speed);
960
961 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
962 &cur_data.link_report_flags))
963 pr_cont("full duplex");
964 else
965 pr_cont("half duplex");
966
967 /* Handle the FC at the end so that only these flags would be
968 * possibly set. This way we may easily check if there is no FC
969 * enabled.
970 */
971 if (cur_data.link_report_flags) {
972 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
973 &cur_data.link_report_flags)) {
974 pr_cont(", receive ");
975 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
976 &cur_data.link_report_flags))
977 pr_cont("& transmit ");
978 } else {
979 pr_cont(", transmit ");
980 }
981 pr_cont("flow control ON");
982 }
983 pr_cont("\n");
984 }
985}
986
987void bnx2x_init_rx_rings(struct bnx2x *bp)
988{
989 int func = BP_FUNC(bp);
990 u16 ring_prod;
991 int i, j;
992
993 /* Allocate TPA resources */
994 for_each_rx_queue(bp, j) {
995 struct bnx2x_fastpath *fp = &bp->fp[j];
996
997 DP(NETIF_MSG_IFUP,
998 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
999
1000 if (!fp->disable_tpa) {
1001 /* Fill the per-aggregtion pool */
1002 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1003 struct bnx2x_agg_info *tpa_info =
1004 &fp->tpa_info[i];
1005 struct sw_rx_bd *first_buf =
1006 &tpa_info->first_buf;
1007
1008 first_buf->skb = netdev_alloc_skb(bp->dev,
1009 fp->rx_buf_size);
1010 if (!first_buf->skb) {
1011 BNX2X_ERR("Failed to allocate TPA "
1012 "skb pool for queue[%d] - "
1013 "disabling TPA on this "
1014 "queue!\n", j);
1015 bnx2x_free_tpa_pool(bp, fp, i);
1016 fp->disable_tpa = 1;
1017 break;
1018 }
1019 dma_unmap_addr_set(first_buf, mapping, 0);
1020 tpa_info->tpa_state = BNX2X_TPA_STOP;
1021 }
1022
1023 /* "next page" elements initialization */
1024 bnx2x_set_next_page_sgl(fp);
1025
1026 /* set SGEs bit mask */
1027 bnx2x_init_sge_ring_bit_mask(fp);
1028
1029 /* Allocate SGEs and initialize the ring elements */
1030 for (i = 0, ring_prod = 0;
1031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1032
1033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1034 BNX2X_ERR("was only able to allocate "
1035 "%d rx sges\n", i);
1036 BNX2X_ERR("disabling TPA for "
1037 "queue[%d]\n", j);
1038 /* Cleanup already allocated elements */
1039 bnx2x_free_rx_sge_range(bp, fp,
1040 ring_prod);
1041 bnx2x_free_tpa_pool(bp, fp,
1042 MAX_AGG_QS(bp));
1043 fp->disable_tpa = 1;
1044 ring_prod = 0;
1045 break;
1046 }
1047 ring_prod = NEXT_SGE_IDX(ring_prod);
1048 }
1049
1050 fp->rx_sge_prod = ring_prod;
1051 }
1052 }
1053
1054 for_each_rx_queue(bp, j) {
1055 struct bnx2x_fastpath *fp = &bp->fp[j];
1056
1057 fp->rx_bd_cons = 0;
1058
1059 /* Activate BD ring */
1060 /* Warning!
1061 * this will generate an interrupt (to the TSTORM)
1062 * must only be done after chip is initialized
1063 */
1064 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1065 fp->rx_sge_prod);
1066
1067 if (j != 0)
1068 continue;
1069
1070 if (CHIP_IS_E1(bp)) {
1071 REG_WR(bp, BAR_USTRORM_INTMEM +
1072 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1073 U64_LO(fp->rx_comp_mapping));
1074 REG_WR(bp, BAR_USTRORM_INTMEM +
1075 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1076 U64_HI(fp->rx_comp_mapping));
1077 }
1078 }
1079}
1080
1081static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1082{
1083 int i;
1084 u8 cos;
1085
1086 for_each_tx_queue(bp, i) {
1087 struct bnx2x_fastpath *fp = &bp->fp[i];
1088 for_each_cos_in_tx_queue(fp, cos) {
1089 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1090
1091 u16 bd_cons = txdata->tx_bd_cons;
1092 u16 sw_prod = txdata->tx_pkt_prod;
1093 u16 sw_cons = txdata->tx_pkt_cons;
1094
1095 while (sw_cons != sw_prod) {
1096 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1097 TX_BD(sw_cons));
1098 sw_cons++;
1099 }
1100 }
1101 }
1102}
1103
1104static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1105{
1106 struct bnx2x *bp = fp->bp;
1107 int i;
1108
1109 /* ring wasn't allocated */
1110 if (fp->rx_buf_ring == NULL)
1111 return;
1112
1113 for (i = 0; i < NUM_RX_BD; i++) {
1114 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1115 struct sk_buff *skb = rx_buf->skb;
1116
1117 if (skb == NULL)
1118 continue;
1119 dma_unmap_single(&bp->pdev->dev,
1120 dma_unmap_addr(rx_buf, mapping),
1121 fp->rx_buf_size, DMA_FROM_DEVICE);
1122
1123 rx_buf->skb = NULL;
1124 dev_kfree_skb(skb);
1125 }
1126}
1127
1128static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1129{
1130 int j;
1131
1132 for_each_rx_queue(bp, j) {
1133 struct bnx2x_fastpath *fp = &bp->fp[j];
1134
1135 bnx2x_free_rx_bds(fp);
1136
1137 if (!fp->disable_tpa)
1138 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1139 }
1140}
1141
1142void bnx2x_free_skbs(struct bnx2x *bp)
1143{
1144 bnx2x_free_tx_skbs(bp);
1145 bnx2x_free_rx_skbs(bp);
1146}
1147
1148void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1149{
1150 /* load old values */
1151 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1152
1153 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1154 /* leave all but MAX value */
1155 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1156
1157 /* set new MAX value */
1158 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1159 & FUNC_MF_CFG_MAX_BW_MASK;
1160
1161 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1162 }
1163}
1164
1165/**
1166 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1167 *
1168 * @bp: driver handle
1169 * @nvecs: number of vectors to be released
1170 */
1171static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1172{
1173 int i, offset = 0;
1174
1175 if (nvecs == offset)
1176 return;
1177 free_irq(bp->msix_table[offset].vector, bp->dev);
1178 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1179 bp->msix_table[offset].vector);
1180 offset++;
1181#ifdef BCM_CNIC
1182 if (nvecs == offset)
1183 return;
1184 offset++;
1185#endif
1186
1187 for_each_eth_queue(bp, i) {
1188 if (nvecs == offset)
1189 return;
1190 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1191 "irq\n", i, bp->msix_table[offset].vector);
1192
1193 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1194 }
1195}
1196
1197void bnx2x_free_irq(struct bnx2x *bp)
1198{
1199 if (bp->flags & USING_MSIX_FLAG)
1200 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1201 CNIC_PRESENT + 1);
1202 else if (bp->flags & USING_MSI_FLAG)
1203 free_irq(bp->pdev->irq, bp->dev);
1204 else
1205 free_irq(bp->pdev->irq, bp->dev);
1206}
1207
1208int bnx2x_enable_msix(struct bnx2x *bp)
1209{
1210 int msix_vec = 0, i, rc, req_cnt;
1211
1212 bp->msix_table[msix_vec].entry = msix_vec;
1213 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1214 bp->msix_table[0].entry);
1215 msix_vec++;
1216
1217#ifdef BCM_CNIC
1218 bp->msix_table[msix_vec].entry = msix_vec;
1219 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1220 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1221 msix_vec++;
1222#endif
1223 /* We need separate vectors for ETH queues only (not FCoE) */
1224 for_each_eth_queue(bp, i) {
1225 bp->msix_table[msix_vec].entry = msix_vec;
1226 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1227 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1228 msix_vec++;
1229 }
1230
1231 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1232
1233 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1234
1235 /*
1236 * reconfigure number of tx/rx queues according to available
1237 * MSI-X vectors
1238 */
1239 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1240 /* how less vectors we will have? */
1241 int diff = req_cnt - rc;
1242
1243 DP(NETIF_MSG_IFUP,
1244 "Trying to use less MSI-X vectors: %d\n", rc);
1245
1246 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1247
1248 if (rc) {
1249 DP(NETIF_MSG_IFUP,
1250 "MSI-X is not attainable rc %d\n", rc);
1251 return rc;
1252 }
1253 /*
1254 * decrease number of queues by number of unallocated entries
1255 */
1256 bp->num_queues -= diff;
1257
1258 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1259 bp->num_queues);
1260 } else if (rc) {
1261 /* fall to INTx if not enough memory */
1262 if (rc == -ENOMEM)
1263 bp->flags |= DISABLE_MSI_FLAG;
1264 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1265 return rc;
1266 }
1267
1268 bp->flags |= USING_MSIX_FLAG;
1269
1270 return 0;
1271}
1272
1273static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1274{
1275 int i, rc, offset = 0;
1276
1277 rc = request_irq(bp->msix_table[offset++].vector,
1278 bnx2x_msix_sp_int, 0,
1279 bp->dev->name, bp->dev);
1280 if (rc) {
1281 BNX2X_ERR("request sp irq failed\n");
1282 return -EBUSY;
1283 }
1284
1285#ifdef BCM_CNIC
1286 offset++;
1287#endif
1288 for_each_eth_queue(bp, i) {
1289 struct bnx2x_fastpath *fp = &bp->fp[i];
1290 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1291 bp->dev->name, i);
1292
1293 rc = request_irq(bp->msix_table[offset].vector,
1294 bnx2x_msix_fp_int, 0, fp->name, fp);
1295 if (rc) {
1296 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1297 bp->msix_table[offset].vector, rc);
1298 bnx2x_free_msix_irqs(bp, offset);
1299 return -EBUSY;
1300 }
1301
1302 offset++;
1303 }
1304
1305 i = BNX2X_NUM_ETH_QUEUES(bp);
1306 offset = 1 + CNIC_PRESENT;
1307 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1308 " ... fp[%d] %d\n",
1309 bp->msix_table[0].vector,
1310 0, bp->msix_table[offset].vector,
1311 i - 1, bp->msix_table[offset + i - 1].vector);
1312
1313 return 0;
1314}
1315
1316int bnx2x_enable_msi(struct bnx2x *bp)
1317{
1318 int rc;
1319
1320 rc = pci_enable_msi(bp->pdev);
1321 if (rc) {
1322 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1323 return -1;
1324 }
1325 bp->flags |= USING_MSI_FLAG;
1326
1327 return 0;
1328}
1329
1330static int bnx2x_req_irq(struct bnx2x *bp)
1331{
1332 unsigned long flags;
1333 int rc;
1334
1335 if (bp->flags & USING_MSI_FLAG)
1336 flags = 0;
1337 else
1338 flags = IRQF_SHARED;
1339
1340 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1341 bp->dev->name, bp->dev);
1342 return rc;
1343}
1344
1345static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1346{
1347 int rc = 0;
1348 if (bp->flags & USING_MSIX_FLAG) {
1349 rc = bnx2x_req_msix_irqs(bp);
1350 if (rc)
1351 return rc;
1352 } else {
1353 bnx2x_ack_int(bp);
1354 rc = bnx2x_req_irq(bp);
1355 if (rc) {
1356 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1357 return rc;
1358 }
1359 if (bp->flags & USING_MSI_FLAG) {
1360 bp->dev->irq = bp->pdev->irq;
1361 netdev_info(bp->dev, "using MSI IRQ %d\n",
1362 bp->pdev->irq);
1363 }
1364 }
1365
1366 return 0;
1367}
1368
1369static inline void bnx2x_napi_enable(struct bnx2x *bp)
1370{
1371 int i;
1372
1373 for_each_rx_queue(bp, i)
1374 napi_enable(&bnx2x_fp(bp, i, napi));
1375}
1376
1377static inline void bnx2x_napi_disable(struct bnx2x *bp)
1378{
1379 int i;
1380
1381 for_each_rx_queue(bp, i)
1382 napi_disable(&bnx2x_fp(bp, i, napi));
1383}
1384
1385void bnx2x_netif_start(struct bnx2x *bp)
1386{
1387 if (netif_running(bp->dev)) {
1388 bnx2x_napi_enable(bp);
1389 bnx2x_int_enable(bp);
1390 if (bp->state == BNX2X_STATE_OPEN)
1391 netif_tx_wake_all_queues(bp->dev);
1392 }
1393}
1394
1395void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1396{
1397 bnx2x_int_disable_sync(bp, disable_hw);
1398 bnx2x_napi_disable(bp);
1399}
1400
1401u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1402{
1403 struct bnx2x *bp = netdev_priv(dev);
1404
1405#ifdef BCM_CNIC
1406 if (!NO_FCOE(bp)) {
1407 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1408 u16 ether_type = ntohs(hdr->h_proto);
1409
1410 /* Skip VLAN tag if present */
1411 if (ether_type == ETH_P_8021Q) {
1412 struct vlan_ethhdr *vhdr =
1413 (struct vlan_ethhdr *)skb->data;
1414
1415 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1416 }
1417
1418 /* If ethertype is FCoE or FIP - use FCoE ring */
1419 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1420 return bnx2x_fcoe_tx(bp, txq_index);
1421 }
1422#endif
1423 /* select a non-FCoE queue */
1424 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1425}
1426
1427void bnx2x_set_num_queues(struct bnx2x *bp)
1428{
1429 switch (bp->multi_mode) {
1430 case ETH_RSS_MODE_DISABLED:
1431 bp->num_queues = 1;
1432 break;
1433 case ETH_RSS_MODE_REGULAR:
1434 bp->num_queues = bnx2x_calc_num_queues(bp);
1435 break;
1436
1437 default:
1438 bp->num_queues = 1;
1439 break;
1440 }
1441
1442 /* Add special queues */
1443 bp->num_queues += NON_ETH_CONTEXT_USE;
1444}
1445
1446/**
1447 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1448 *
1449 * @bp: Driver handle
1450 *
1451 * We currently support for at most 16 Tx queues for each CoS thus we will
1452 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1453 * bp->max_cos.
1454 *
1455 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1456 * index after all ETH L2 indices.
1457 *
1458 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1459 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1460 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1461 *
1462 * The proper configuration of skb->queue_mapping is handled by
1463 * bnx2x_select_queue() and __skb_tx_hash().
1464 *
1465 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1466 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1467 */
1468static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1469{
1470 int rc, tx, rx;
1471
1472 tx = MAX_TXQS_PER_COS * bp->max_cos;
1473 rx = BNX2X_NUM_ETH_QUEUES(bp);
1474
1475/* account for fcoe queue */
1476#ifdef BCM_CNIC
1477 if (!NO_FCOE(bp)) {
1478 rx += FCOE_PRESENT;
1479 tx += FCOE_PRESENT;
1480 }
1481#endif
1482
1483 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1484 if (rc) {
1485 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1486 return rc;
1487 }
1488 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1489 if (rc) {
1490 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1491 return rc;
1492 }
1493
1494 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1495 tx, rx);
1496
1497 return rc;
1498}
1499
1500static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1501{
1502 int i;
1503
1504 for_each_queue(bp, i) {
1505 struct bnx2x_fastpath *fp = &bp->fp[i];
1506
1507 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1508 if (IS_FCOE_IDX(i))
1509 /*
1510 * Although there are no IP frames expected to arrive to
1511 * this ring we still want to add an
1512 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1513 * overrun attack.
1514 */
1515 fp->rx_buf_size =
1516 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1517 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1518 else
1519 fp->rx_buf_size =
1520 bp->dev->mtu + ETH_OVREHEAD +
1521 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1522 }
1523}
1524
1525static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1526{
1527 int i;
1528 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1529 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1530
1531 /*
1532 * Prepare the inital contents fo the indirection table if RSS is
1533 * enabled
1534 */
1535 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1536 for (i = 0; i < sizeof(ind_table); i++)
1537 ind_table[i] =
1538 bp->fp->cl_id + (i % num_eth_queues);
1539 }
1540
1541 /*
1542 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1543 * per-port, so if explicit configuration is needed , do it only
1544 * for a PMF.
1545 *
1546 * For 57712 and newer on the other hand it's a per-function
1547 * configuration.
1548 */
1549 return bnx2x_config_rss_pf(bp, ind_table,
1550 bp->port.pmf || !CHIP_IS_E1x(bp));
1551}
1552
1553int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1554{
1555 struct bnx2x_config_rss_params params = {0};
1556 int i;
1557
1558 /* Although RSS is meaningless when there is a single HW queue we
1559 * still need it enabled in order to have HW Rx hash generated.
1560 *
1561 * if (!is_eth_multi(bp))
1562 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1563 */
1564
1565 params.rss_obj = &bp->rss_conf_obj;
1566
1567 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1568
1569 /* RSS mode */
1570 switch (bp->multi_mode) {
1571 case ETH_RSS_MODE_DISABLED:
1572 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1573 break;
1574 case ETH_RSS_MODE_REGULAR:
1575 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1576 break;
1577 case ETH_RSS_MODE_VLAN_PRI:
1578 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1579 break;
1580 case ETH_RSS_MODE_E1HOV_PRI:
1581 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1582 break;
1583 case ETH_RSS_MODE_IP_DSCP:
1584 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1585 break;
1586 default:
1587 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1588 return -EINVAL;
1589 }
1590
1591 /* If RSS is enabled */
1592 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1593 /* RSS configuration */
1594 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1595 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1596 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1597 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1598
1599 /* Hash bits */
1600 params.rss_result_mask = MULTI_MASK;
1601
1602 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1603
1604 if (config_hash) {
1605 /* RSS keys */
1606 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1607 params.rss_key[i] = random32();
1608
1609 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1610 }
1611 }
1612
1613 return bnx2x_config_rss(bp, &params);
1614}
1615
1616static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1617{
1618 struct bnx2x_func_state_params func_params = {0};
1619
1620 /* Prepare parameters for function state transitions */
1621 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1622
1623 func_params.f_obj = &bp->func_obj;
1624 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1625
1626 func_params.params.hw_init.load_phase = load_code;
1627
1628 return bnx2x_func_state_change(bp, &func_params);
1629}
1630
1631/*
1632 * Cleans the object that have internal lists without sending
1633 * ramrods. Should be run when interrutps are disabled.
1634 */
1635static void bnx2x_squeeze_objects(struct bnx2x *bp)
1636{
1637 int rc;
1638 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1639 struct bnx2x_mcast_ramrod_params rparam = {0};
1640 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1641
1642 /***************** Cleanup MACs' object first *************************/
1643
1644 /* Wait for completion of requested */
1645 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1646 /* Perform a dry cleanup */
1647 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1648
1649 /* Clean ETH primary MAC */
1650 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1651 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1652 &ramrod_flags);
1653 if (rc != 0)
1654 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1655
1656 /* Cleanup UC list */
1657 vlan_mac_flags = 0;
1658 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1659 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1660 &ramrod_flags);
1661 if (rc != 0)
1662 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1663
1664 /***************** Now clean mcast object *****************************/
1665 rparam.mcast_obj = &bp->mcast_obj;
1666 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1667
1668 /* Add a DEL command... */
1669 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1670 if (rc < 0)
1671 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1672 "object: %d\n", rc);
1673
1674 /* ...and wait until all pending commands are cleared */
1675 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1676 while (rc != 0) {
1677 if (rc < 0) {
1678 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1679 rc);
1680 return;
1681 }
1682
1683 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1684 }
1685}
1686
1687#ifndef BNX2X_STOP_ON_ERROR
1688#define LOAD_ERROR_EXIT(bp, label) \
1689 do { \
1690 (bp)->state = BNX2X_STATE_ERROR; \
1691 goto label; \
1692 } while (0)
1693#else
1694#define LOAD_ERROR_EXIT(bp, label) \
1695 do { \
1696 (bp)->state = BNX2X_STATE_ERROR; \
1697 (bp)->panic = 1; \
1698 return -EBUSY; \
1699 } while (0)
1700#endif
1701
1702/* must be called with rtnl_lock */
1703int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1704{
1705 int port = BP_PORT(bp);
1706 u32 load_code;
1707 int i, rc;
1708
1709#ifdef BNX2X_STOP_ON_ERROR
1710 if (unlikely(bp->panic))
1711 return -EPERM;
1712#endif
1713
1714 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1715
1716 /* Set the initial link reported state to link down */
1717 bnx2x_acquire_phy_lock(bp);
1718 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1719 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1720 &bp->last_reported_link.link_report_flags);
1721 bnx2x_release_phy_lock(bp);
1722
1723 /* must be called before memory allocation and HW init */
1724 bnx2x_ilt_set_info(bp);
1725
1726 /*
1727 * Zero fastpath structures preserving invariants like napi, which are
1728 * allocated only once, fp index, max_cos, bp pointer.
1729 * Also set fp->disable_tpa.
1730 */
1731 for_each_queue(bp, i)
1732 bnx2x_bz_fp(bp, i);
1733
1734
1735 /* Set the receive queues buffer size */
1736 bnx2x_set_rx_buf_size(bp);
1737
1738 if (bnx2x_alloc_mem(bp))
1739 return -ENOMEM;
1740
1741 /* As long as bnx2x_alloc_mem() may possibly update
1742 * bp->num_queues, bnx2x_set_real_num_queues() should always
1743 * come after it.
1744 */
1745 rc = bnx2x_set_real_num_queues(bp);
1746 if (rc) {
1747 BNX2X_ERR("Unable to set real_num_queues\n");
1748 LOAD_ERROR_EXIT(bp, load_error0);
1749 }
1750
1751 /* configure multi cos mappings in kernel.
1752 * this configuration may be overriden by a multi class queue discipline
1753 * or by a dcbx negotiation result.
1754 */
1755 bnx2x_setup_tc(bp->dev, bp->max_cos);
1756
1757 bnx2x_napi_enable(bp);
1758
1759 /* Send LOAD_REQUEST command to MCP
1760 * Returns the type of LOAD command:
1761 * if it is the first port to be initialized
1762 * common blocks should be initialized, otherwise - not
1763 */
1764 if (!BP_NOMCP(bp)) {
1765 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1766 if (!load_code) {
1767 BNX2X_ERR("MCP response failure, aborting\n");
1768 rc = -EBUSY;
1769 LOAD_ERROR_EXIT(bp, load_error1);
1770 }
1771 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1772 rc = -EBUSY; /* other port in diagnostic mode */
1773 LOAD_ERROR_EXIT(bp, load_error1);
1774 }
1775
1776 } else {
1777 int path = BP_PATH(bp);
1778
1779 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1780 path, load_count[path][0], load_count[path][1],
1781 load_count[path][2]);
1782 load_count[path][0]++;
1783 load_count[path][1 + port]++;
1784 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1785 path, load_count[path][0], load_count[path][1],
1786 load_count[path][2]);
1787 if (load_count[path][0] == 1)
1788 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1789 else if (load_count[path][1 + port] == 1)
1790 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1791 else
1792 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1793 }
1794
1795 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1796 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1797 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1798 bp->port.pmf = 1;
1799 /*
1800 * We need the barrier to ensure the ordering between the
1801 * writing to bp->port.pmf here and reading it from the
1802 * bnx2x_periodic_task().
1803 */
1804 smp_mb();
1805 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1806 } else
1807 bp->port.pmf = 0;
1808
1809 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1810
1811 /* Init Function state controlling object */
1812 bnx2x__init_func_obj(bp);
1813
1814 /* Initialize HW */
1815 rc = bnx2x_init_hw(bp, load_code);
1816 if (rc) {
1817 BNX2X_ERR("HW init failed, aborting\n");
1818 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1819 LOAD_ERROR_EXIT(bp, load_error2);
1820 }
1821
1822 /* Connect to IRQs */
1823 rc = bnx2x_setup_irqs(bp);
1824 if (rc) {
1825 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1826 LOAD_ERROR_EXIT(bp, load_error2);
1827 }
1828
1829 /* Setup NIC internals and enable interrupts */
1830 bnx2x_nic_init(bp, load_code);
1831
1832 /* Init per-function objects */
1833 bnx2x_init_bp_objs(bp);
1834
1835 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1836 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1837 (bp->common.shmem2_base)) {
1838 if (SHMEM2_HAS(bp, dcc_support))
1839 SHMEM2_WR(bp, dcc_support,
1840 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1841 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1842 }
1843
1844 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1845 rc = bnx2x_func_start(bp);
1846 if (rc) {
1847 BNX2X_ERR("Function start failed!\n");
1848 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1849 LOAD_ERROR_EXIT(bp, load_error3);
1850 }
1851
1852 /* Send LOAD_DONE command to MCP */
1853 if (!BP_NOMCP(bp)) {
1854 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1855 if (!load_code) {
1856 BNX2X_ERR("MCP response failure, aborting\n");
1857 rc = -EBUSY;
1858 LOAD_ERROR_EXIT(bp, load_error3);
1859 }
1860 }
1861
1862 rc = bnx2x_setup_leading(bp);
1863 if (rc) {
1864 BNX2X_ERR("Setup leading failed!\n");
1865 LOAD_ERROR_EXIT(bp, load_error3);
1866 }
1867
1868#ifdef BCM_CNIC
1869 /* Enable Timer scan */
1870 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1871#endif
1872
1873 for_each_nondefault_queue(bp, i) {
1874 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1875 if (rc)
1876 LOAD_ERROR_EXIT(bp, load_error4);
1877 }
1878
1879 rc = bnx2x_init_rss_pf(bp);
1880 if (rc)
1881 LOAD_ERROR_EXIT(bp, load_error4);
1882
1883 /* Now when Clients are configured we are ready to work */
1884 bp->state = BNX2X_STATE_OPEN;
1885
1886 /* Configure a ucast MAC */
1887 rc = bnx2x_set_eth_mac(bp, true);
1888 if (rc)
1889 LOAD_ERROR_EXIT(bp, load_error4);
1890
1891 if (bp->pending_max) {
1892 bnx2x_update_max_mf_config(bp, bp->pending_max);
1893 bp->pending_max = 0;
1894 }
1895
1896 if (bp->port.pmf)
1897 bnx2x_initial_phy_init(bp, load_mode);
1898
1899 /* Start fast path */
1900
1901 /* Initialize Rx filter. */
1902 netif_addr_lock_bh(bp->dev);
1903 bnx2x_set_rx_mode(bp->dev);
1904 netif_addr_unlock_bh(bp->dev);
1905
1906 /* Start the Tx */
1907 switch (load_mode) {
1908 case LOAD_NORMAL:
1909 /* Tx queue should be only reenabled */
1910 netif_tx_wake_all_queues(bp->dev);
1911 break;
1912
1913 case LOAD_OPEN:
1914 netif_tx_start_all_queues(bp->dev);
1915 smp_mb__after_clear_bit();
1916 break;
1917
1918 case LOAD_DIAG:
1919 bp->state = BNX2X_STATE_DIAG;
1920 break;
1921
1922 default:
1923 break;
1924 }
1925
1926 if (!bp->port.pmf)
1927 bnx2x__link_status_update(bp);
1928
1929 /* start the timer */
1930 mod_timer(&bp->timer, jiffies + bp->current_interval);
1931
1932#ifdef BCM_CNIC
1933 bnx2x_setup_cnic_irq_info(bp);
1934 if (bp->state == BNX2X_STATE_OPEN)
1935 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1936#endif
1937 bnx2x_inc_load_cnt(bp);
1938
1939 /* Wait for all pending SP commands to complete */
1940 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1941 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1942 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1943 return -EBUSY;
1944 }
1945
1946 bnx2x_dcbx_init(bp);
1947 return 0;
1948
1949#ifndef BNX2X_STOP_ON_ERROR
1950load_error4:
1951#ifdef BCM_CNIC
1952 /* Disable Timer scan */
1953 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
1954#endif
1955load_error3:
1956 bnx2x_int_disable_sync(bp, 1);
1957
1958 /* Clean queueable objects */
1959 bnx2x_squeeze_objects(bp);
1960
1961 /* Free SKBs, SGEs, TPA pool and driver internals */
1962 bnx2x_free_skbs(bp);
1963 for_each_rx_queue(bp, i)
1964 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1965
1966 /* Release IRQs */
1967 bnx2x_free_irq(bp);
1968load_error2:
1969 if (!BP_NOMCP(bp)) {
1970 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1971 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1972 }
1973
1974 bp->port.pmf = 0;
1975load_error1:
1976 bnx2x_napi_disable(bp);
1977load_error0:
1978 bnx2x_free_mem(bp);
1979
1980 return rc;
1981#endif /* ! BNX2X_STOP_ON_ERROR */
1982}
1983
1984/* must be called with rtnl_lock */
1985int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1986{
1987 int i;
1988 bool global = false;
1989
1990 if ((bp->state == BNX2X_STATE_CLOSED) ||
1991 (bp->state == BNX2X_STATE_ERROR)) {
1992 /* We can get here if the driver has been unloaded
1993 * during parity error recovery and is either waiting for a
1994 * leader to complete or for other functions to unload and
1995 * then ifdown has been issued. In this case we want to
1996 * unload and let other functions to complete a recovery
1997 * process.
1998 */
1999 bp->recovery_state = BNX2X_RECOVERY_DONE;
2000 bp->is_leader = 0;
2001 bnx2x_release_leader_lock(bp);
2002 smp_mb();
2003
2004 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
2005
2006 return -EINVAL;
2007 }
2008
2009 /*
2010 * It's important to set the bp->state to the value different from
2011 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2012 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2013 */
2014 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2015 smp_mb();
2016
2017 /* Stop Tx */
2018 bnx2x_tx_disable(bp);
2019
2020#ifdef BCM_CNIC
2021 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2022#endif
2023
2024 bp->rx_mode = BNX2X_RX_MODE_NONE;
2025
2026 del_timer_sync(&bp->timer);
2027
2028 /* Set ALWAYS_ALIVE bit in shmem */
2029 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2030
2031 bnx2x_drv_pulse(bp);
2032
2033 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2034
2035 /* Cleanup the chip if needed */
2036 if (unload_mode != UNLOAD_RECOVERY)
2037 bnx2x_chip_cleanup(bp, unload_mode);
2038 else {
2039 /* Send the UNLOAD_REQUEST to the MCP */
2040 bnx2x_send_unload_req(bp, unload_mode);
2041
2042 /*
2043 * Prevent transactions to host from the functions on the
2044 * engine that doesn't reset global blocks in case of global
2045 * attention once gloabl blocks are reset and gates are opened
2046 * (the engine which leader will perform the recovery
2047 * last).
2048 */
2049 if (!CHIP_IS_E1x(bp))
2050 bnx2x_pf_disable(bp);
2051
2052 /* Disable HW interrupts, NAPI */
2053 bnx2x_netif_stop(bp, 1);
2054
2055 /* Release IRQs */
2056 bnx2x_free_irq(bp);
2057
2058 /* Report UNLOAD_DONE to MCP */
2059 bnx2x_send_unload_done(bp);
2060 }
2061
2062 /*
2063 * At this stage no more interrupts will arrive so we may safly clean
2064 * the queueable objects here in case they failed to get cleaned so far.
2065 */
2066 bnx2x_squeeze_objects(bp);
2067
2068 /* There should be no more pending SP commands at this stage */
2069 bp->sp_state = 0;
2070
2071 bp->port.pmf = 0;
2072
2073 /* Free SKBs, SGEs, TPA pool and driver internals */
2074 bnx2x_free_skbs(bp);
2075 for_each_rx_queue(bp, i)
2076 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2077
2078 bnx2x_free_mem(bp);
2079
2080 bp->state = BNX2X_STATE_CLOSED;
2081
2082 /* Check if there are pending parity attentions. If there are - set
2083 * RECOVERY_IN_PROGRESS.
2084 */
2085 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2086 bnx2x_set_reset_in_progress(bp);
2087
2088 /* Set RESET_IS_GLOBAL if needed */
2089 if (global)
2090 bnx2x_set_reset_global(bp);
2091 }
2092
2093
2094 /* The last driver must disable a "close the gate" if there is no
2095 * parity attention or "process kill" pending.
2096 */
2097 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2098 bnx2x_disable_close_the_gate(bp);
2099
2100 return 0;
2101}
2102
2103int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2104{
2105 u16 pmcsr;
2106
2107 /* If there is no power capability, silently succeed */
2108 if (!bp->pm_cap) {
2109 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2110 return 0;
2111 }
2112
2113 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2114
2115 switch (state) {
2116 case PCI_D0:
2117 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2118 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2119 PCI_PM_CTRL_PME_STATUS));
2120
2121 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2122 /* delay required during transition out of D3hot */
2123 msleep(20);
2124 break;
2125
2126 case PCI_D3hot:
2127 /* If there are other clients above don't
2128 shut down the power */
2129 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2130 return 0;
2131 /* Don't shut down the power for emulation and FPGA */
2132 if (CHIP_REV_IS_SLOW(bp))
2133 return 0;
2134
2135 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2136 pmcsr |= 3;
2137
2138 if (bp->wol)
2139 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2140
2141 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2142 pmcsr);
2143
2144 /* No more memory access after this point until
2145 * device is brought back to D0.
2146 */
2147 break;
2148
2149 default:
2150 return -EINVAL;
2151 }
2152 return 0;
2153}
2154
2155/*
2156 * net_device service functions
2157 */
2158int bnx2x_poll(struct napi_struct *napi, int budget)
2159{
2160 int work_done = 0;
2161 u8 cos;
2162 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2163 napi);
2164 struct bnx2x *bp = fp->bp;
2165
2166 while (1) {
2167#ifdef BNX2X_STOP_ON_ERROR
2168 if (unlikely(bp->panic)) {
2169 napi_complete(napi);
2170 return 0;
2171 }
2172#endif
2173
2174 for_each_cos_in_tx_queue(fp, cos)
2175 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2176 bnx2x_tx_int(bp, &fp->txdata[cos]);
2177
2178
2179 if (bnx2x_has_rx_work(fp)) {
2180 work_done += bnx2x_rx_int(fp, budget - work_done);
2181
2182 /* must not complete if we consumed full budget */
2183 if (work_done >= budget)
2184 break;
2185 }
2186
2187 /* Fall out from the NAPI loop if needed */
2188 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2189#ifdef BCM_CNIC
2190 /* No need to update SB for FCoE L2 ring as long as
2191 * it's connected to the default SB and the SB
2192 * has been updated when NAPI was scheduled.
2193 */
2194 if (IS_FCOE_FP(fp)) {
2195 napi_complete(napi);
2196 break;
2197 }
2198#endif
2199
2200 bnx2x_update_fpsb_idx(fp);
2201 /* bnx2x_has_rx_work() reads the status block,
2202 * thus we need to ensure that status block indices
2203 * have been actually read (bnx2x_update_fpsb_idx)
2204 * prior to this check (bnx2x_has_rx_work) so that
2205 * we won't write the "newer" value of the status block
2206 * to IGU (if there was a DMA right after
2207 * bnx2x_has_rx_work and if there is no rmb, the memory
2208 * reading (bnx2x_update_fpsb_idx) may be postponed
2209 * to right before bnx2x_ack_sb). In this case there
2210 * will never be another interrupt until there is
2211 * another update of the status block, while there
2212 * is still unhandled work.
2213 */
2214 rmb();
2215
2216 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2217 napi_complete(napi);
2218 /* Re-enable interrupts */
2219 DP(NETIF_MSG_HW,
2220 "Update index to %d\n", fp->fp_hc_idx);
2221 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2222 le16_to_cpu(fp->fp_hc_idx),
2223 IGU_INT_ENABLE, 1);
2224 break;
2225 }
2226 }
2227 }
2228
2229 return work_done;
2230}
2231
2232/* we split the first BD into headers and data BDs
2233 * to ease the pain of our fellow microcode engineers
2234 * we use one mapping for both BDs
2235 * So far this has only been observed to happen
2236 * in Other Operating Systems(TM)
2237 */
2238static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2239 struct bnx2x_fp_txdata *txdata,
2240 struct sw_tx_bd *tx_buf,
2241 struct eth_tx_start_bd **tx_bd, u16 hlen,
2242 u16 bd_prod, int nbd)
2243{
2244 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2245 struct eth_tx_bd *d_tx_bd;
2246 dma_addr_t mapping;
2247 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2248
2249 /* first fix first BD */
2250 h_tx_bd->nbd = cpu_to_le16(nbd);
2251 h_tx_bd->nbytes = cpu_to_le16(hlen);
2252
2253 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2254 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2255 h_tx_bd->addr_lo, h_tx_bd->nbd);
2256
2257 /* now get a new data BD
2258 * (after the pbd) and fill it */
2259 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2260 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2261
2262 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2263 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2264
2265 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2266 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2267 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2268
2269 /* this marks the BD as one that has no individual mapping */
2270 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2271
2272 DP(NETIF_MSG_TX_QUEUED,
2273 "TSO split data size is %d (%x:%x)\n",
2274 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2275
2276 /* update tx_bd */
2277 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2278
2279 return bd_prod;
2280}
2281
2282static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2283{
2284 if (fix > 0)
2285 csum = (u16) ~csum_fold(csum_sub(csum,
2286 csum_partial(t_header - fix, fix, 0)));
2287
2288 else if (fix < 0)
2289 csum = (u16) ~csum_fold(csum_add(csum,
2290 csum_partial(t_header, -fix, 0)));
2291
2292 return swab16(csum);
2293}
2294
2295static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2296{
2297 u32 rc;
2298
2299 if (skb->ip_summed != CHECKSUM_PARTIAL)
2300 rc = XMIT_PLAIN;
2301
2302 else {
2303 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2304 rc = XMIT_CSUM_V6;
2305 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2306 rc |= XMIT_CSUM_TCP;
2307
2308 } else {
2309 rc = XMIT_CSUM_V4;
2310 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2311 rc |= XMIT_CSUM_TCP;
2312 }
2313 }
2314
2315 if (skb_is_gso_v6(skb))
2316 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2317 else if (skb_is_gso(skb))
2318 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2319
2320 return rc;
2321}
2322
2323#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2324/* check if packet requires linearization (packet is too fragmented)
2325 no need to check fragmentation if page size > 8K (there will be no
2326 violation to FW restrictions) */
2327static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2328 u32 xmit_type)
2329{
2330 int to_copy = 0;
2331 int hlen = 0;
2332 int first_bd_sz = 0;
2333
2334 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2335 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2336
2337 if (xmit_type & XMIT_GSO) {
2338 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2339 /* Check if LSO packet needs to be copied:
2340 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2341 int wnd_size = MAX_FETCH_BD - 3;
2342 /* Number of windows to check */
2343 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2344 int wnd_idx = 0;
2345 int frag_idx = 0;
2346 u32 wnd_sum = 0;
2347
2348 /* Headers length */
2349 hlen = (int)(skb_transport_header(skb) - skb->data) +
2350 tcp_hdrlen(skb);
2351
2352 /* Amount of data (w/o headers) on linear part of SKB*/
2353 first_bd_sz = skb_headlen(skb) - hlen;
2354
2355 wnd_sum = first_bd_sz;
2356
2357 /* Calculate the first sum - it's special */
2358 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2359 wnd_sum +=
2360 skb_shinfo(skb)->frags[frag_idx].size;
2361
2362 /* If there was data on linear skb data - check it */
2363 if (first_bd_sz > 0) {
2364 if (unlikely(wnd_sum < lso_mss)) {
2365 to_copy = 1;
2366 goto exit_lbl;
2367 }
2368
2369 wnd_sum -= first_bd_sz;
2370 }
2371
2372 /* Others are easier: run through the frag list and
2373 check all windows */
2374 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2375 wnd_sum +=
2376 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2377
2378 if (unlikely(wnd_sum < lso_mss)) {
2379 to_copy = 1;
2380 break;
2381 }
2382 wnd_sum -=
2383 skb_shinfo(skb)->frags[wnd_idx].size;
2384 }
2385 } else {
2386 /* in non-LSO too fragmented packet should always
2387 be linearized */
2388 to_copy = 1;
2389 }
2390 }
2391
2392exit_lbl:
2393 if (unlikely(to_copy))
2394 DP(NETIF_MSG_TX_QUEUED,
2395 "Linearization IS REQUIRED for %s packet. "
2396 "num_frags %d hlen %d first_bd_sz %d\n",
2397 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2398 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2399
2400 return to_copy;
2401}
2402#endif
2403
2404static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2405 u32 xmit_type)
2406{
2407 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2408 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2409 ETH_TX_PARSE_BD_E2_LSO_MSS;
2410 if ((xmit_type & XMIT_GSO_V6) &&
2411 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2412 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2413}
2414
2415/**
2416 * bnx2x_set_pbd_gso - update PBD in GSO case.
2417 *
2418 * @skb: packet skb
2419 * @pbd: parse BD
2420 * @xmit_type: xmit flags
2421 */
2422static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2423 struct eth_tx_parse_bd_e1x *pbd,
2424 u32 xmit_type)
2425{
2426 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2427 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2428 pbd->tcp_flags = pbd_tcp_flags(skb);
2429
2430 if (xmit_type & XMIT_GSO_V4) {
2431 pbd->ip_id = swab16(ip_hdr(skb)->id);
2432 pbd->tcp_pseudo_csum =
2433 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2434 ip_hdr(skb)->daddr,
2435 0, IPPROTO_TCP, 0));
2436
2437 } else
2438 pbd->tcp_pseudo_csum =
2439 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2440 &ipv6_hdr(skb)->daddr,
2441 0, IPPROTO_TCP, 0));
2442
2443 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2444}
2445
2446/**
2447 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2448 *
2449 * @bp: driver handle
2450 * @skb: packet skb
2451 * @parsing_data: data to be updated
2452 * @xmit_type: xmit flags
2453 *
2454 * 57712 related
2455 */
2456static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2457 u32 *parsing_data, u32 xmit_type)
2458{
2459 *parsing_data |=
2460 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2461 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2462 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2463
2464 if (xmit_type & XMIT_CSUM_TCP) {
2465 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2466 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2467 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2468
2469 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2470 } else
2471 /* We support checksum offload for TCP and UDP only.
2472 * No need to pass the UDP header length - it's a constant.
2473 */
2474 return skb_transport_header(skb) +
2475 sizeof(struct udphdr) - skb->data;
2476}
2477
2478static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2479 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2480{
2481 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2482
2483 if (xmit_type & XMIT_CSUM_V4)
2484 tx_start_bd->bd_flags.as_bitfield |=
2485 ETH_TX_BD_FLAGS_IP_CSUM;
2486 else
2487 tx_start_bd->bd_flags.as_bitfield |=
2488 ETH_TX_BD_FLAGS_IPV6;
2489
2490 if (!(xmit_type & XMIT_CSUM_TCP))
2491 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2492}
2493
2494/**
2495 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2496 *
2497 * @bp: driver handle
2498 * @skb: packet skb
2499 * @pbd: parse BD to be updated
2500 * @xmit_type: xmit flags
2501 */
2502static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2503 struct eth_tx_parse_bd_e1x *pbd,
2504 u32 xmit_type)
2505{
2506 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2507
2508 /* for now NS flag is not used in Linux */
2509 pbd->global_data =
2510 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2511 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2512
2513 pbd->ip_hlen_w = (skb_transport_header(skb) -
2514 skb_network_header(skb)) >> 1;
2515
2516 hlen += pbd->ip_hlen_w;
2517
2518 /* We support checksum offload for TCP and UDP only */
2519 if (xmit_type & XMIT_CSUM_TCP)
2520 hlen += tcp_hdrlen(skb) / 2;
2521 else
2522 hlen += sizeof(struct udphdr) / 2;
2523
2524 pbd->total_hlen_w = cpu_to_le16(hlen);
2525 hlen = hlen*2;
2526
2527 if (xmit_type & XMIT_CSUM_TCP) {
2528 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2529
2530 } else {
2531 s8 fix = SKB_CS_OFF(skb); /* signed! */
2532
2533 DP(NETIF_MSG_TX_QUEUED,
2534 "hlen %d fix %d csum before fix %x\n",
2535 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2536
2537 /* HW bug: fixup the CSUM */
2538 pbd->tcp_pseudo_csum =
2539 bnx2x_csum_fix(skb_transport_header(skb),
2540 SKB_CS(skb), fix);
2541
2542 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2543 pbd->tcp_pseudo_csum);
2544 }
2545
2546 return hlen;
2547}
2548
2549/* called with netif_tx_lock
2550 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2551 * netif_wake_queue()
2552 */
2553netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2554{
2555 struct bnx2x *bp = netdev_priv(dev);
2556
2557 struct bnx2x_fastpath *fp;
2558 struct netdev_queue *txq;
2559 struct bnx2x_fp_txdata *txdata;
2560 struct sw_tx_bd *tx_buf;
2561 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2562 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2563 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2564 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2565 u32 pbd_e2_parsing_data = 0;
2566 u16 pkt_prod, bd_prod;
2567 int nbd, txq_index, fp_index, txdata_index;
2568 dma_addr_t mapping;
2569 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2570 int i;
2571 u8 hlen = 0;
2572 __le16 pkt_size = 0;
2573 struct ethhdr *eth;
2574 u8 mac_type = UNICAST_ADDRESS;
2575
2576#ifdef BNX2X_STOP_ON_ERROR
2577 if (unlikely(bp->panic))
2578 return NETDEV_TX_BUSY;
2579#endif
2580
2581 txq_index = skb_get_queue_mapping(skb);
2582 txq = netdev_get_tx_queue(dev, txq_index);
2583
2584 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2585
2586 /* decode the fastpath index and the cos index from the txq */
2587 fp_index = TXQ_TO_FP(txq_index);
2588 txdata_index = TXQ_TO_COS(txq_index);
2589
2590#ifdef BCM_CNIC
2591 /*
2592 * Override the above for the FCoE queue:
2593 * - FCoE fp entry is right after the ETH entries.
2594 * - FCoE L2 queue uses bp->txdata[0] only.
2595 */
2596 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2597 bnx2x_fcoe_tx(bp, txq_index)))) {
2598 fp_index = FCOE_IDX;
2599 txdata_index = 0;
2600 }
2601#endif
2602
2603 /* enable this debug print to view the transmission queue being used
2604 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2605 txq_index, fp_index, txdata_index); */
2606
2607 /* locate the fastpath and the txdata */
2608 fp = &bp->fp[fp_index];
2609 txdata = &fp->txdata[txdata_index];
2610
2611 /* enable this debug print to view the tranmission details
2612 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2613 " tx_data ptr %p fp pointer %p",
2614 txdata->cid, fp_index, txdata_index, txdata, fp); */
2615
2616 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2617 (skb_shinfo(skb)->nr_frags + 3))) {
2618 fp->eth_q_stats.driver_xoff++;
2619 netif_tx_stop_queue(txq);
2620 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2621 return NETDEV_TX_BUSY;
2622 }
2623
2624 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2625 "protocol(%x,%x) gso type %x xmit_type %x\n",
2626 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2627 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2628
2629 eth = (struct ethhdr *)skb->data;
2630
2631 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2632 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2633 if (is_broadcast_ether_addr(eth->h_dest))
2634 mac_type = BROADCAST_ADDRESS;
2635 else
2636 mac_type = MULTICAST_ADDRESS;
2637 }
2638
2639#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2640 /* First, check if we need to linearize the skb (due to FW
2641 restrictions). No need to check fragmentation if page size > 8K
2642 (there will be no violation to FW restrictions) */
2643 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2644 /* Statistics of linearization */
2645 bp->lin_cnt++;
2646 if (skb_linearize(skb) != 0) {
2647 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2648 "silently dropping this SKB\n");
2649 dev_kfree_skb_any(skb);
2650 return NETDEV_TX_OK;
2651 }
2652 }
2653#endif
2654 /* Map skb linear data for DMA */
2655 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2656 skb_headlen(skb), DMA_TO_DEVICE);
2657 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2658 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2659 "silently dropping this SKB\n");
2660 dev_kfree_skb_any(skb);
2661 return NETDEV_TX_OK;
2662 }
2663 /*
2664 Please read carefully. First we use one BD which we mark as start,
2665 then we have a parsing info BD (used for TSO or xsum),
2666 and only then we have the rest of the TSO BDs.
2667 (don't forget to mark the last one as last,
2668 and to unmap only AFTER you write to the BD ...)
2669 And above all, all pdb sizes are in words - NOT DWORDS!
2670 */
2671
2672 /* get current pkt produced now - advance it just before sending packet
2673 * since mapping of pages may fail and cause packet to be dropped
2674 */
2675 pkt_prod = txdata->tx_pkt_prod;
2676 bd_prod = TX_BD(txdata->tx_bd_prod);
2677
2678 /* get a tx_buf and first BD
2679 * tx_start_bd may be changed during SPLIT,
2680 * but first_bd will always stay first
2681 */
2682 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2683 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2684 first_bd = tx_start_bd;
2685
2686 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2687 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2688 mac_type);
2689
2690 /* header nbd */
2691 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2692
2693 /* remember the first BD of the packet */
2694 tx_buf->first_bd = txdata->tx_bd_prod;
2695 tx_buf->skb = skb;
2696 tx_buf->flags = 0;
2697
2698 DP(NETIF_MSG_TX_QUEUED,
2699 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2700 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2701
2702 if (vlan_tx_tag_present(skb)) {
2703 tx_start_bd->vlan_or_ethertype =
2704 cpu_to_le16(vlan_tx_tag_get(skb));
2705 tx_start_bd->bd_flags.as_bitfield |=
2706 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2707 } else
2708 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2709
2710 /* turn on parsing and get a BD */
2711 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2712
2713 if (xmit_type & XMIT_CSUM)
2714 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2715
2716 if (!CHIP_IS_E1x(bp)) {
2717 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2718 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2719 /* Set PBD in checksum offload case */
2720 if (xmit_type & XMIT_CSUM)
2721 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2722 &pbd_e2_parsing_data,
2723 xmit_type);
2724 if (IS_MF_SI(bp)) {
2725 /*
2726 * fill in the MAC addresses in the PBD - for local
2727 * switching
2728 */
2729 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2730 &pbd_e2->src_mac_addr_mid,
2731 &pbd_e2->src_mac_addr_lo,
2732 eth->h_source);
2733 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2734 &pbd_e2->dst_mac_addr_mid,
2735 &pbd_e2->dst_mac_addr_lo,
2736 eth->h_dest);
2737 }
2738 } else {
2739 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2740 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2741 /* Set PBD in checksum offload case */
2742 if (xmit_type & XMIT_CSUM)
2743 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2744
2745 }
2746
2747 /* Setup the data pointer of the first BD of the packet */
2748 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2749 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2750 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2751 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2752 pkt_size = tx_start_bd->nbytes;
2753
2754 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2755 " nbytes %d flags %x vlan %x\n",
2756 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2757 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2758 tx_start_bd->bd_flags.as_bitfield,
2759 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2760
2761 if (xmit_type & XMIT_GSO) {
2762
2763 DP(NETIF_MSG_TX_QUEUED,
2764 "TSO packet len %d hlen %d total len %d tso size %d\n",
2765 skb->len, hlen, skb_headlen(skb),
2766 skb_shinfo(skb)->gso_size);
2767
2768 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2769
2770 if (unlikely(skb_headlen(skb) > hlen))
2771 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2772 &tx_start_bd, hlen,
2773 bd_prod, ++nbd);
2774 if (!CHIP_IS_E1x(bp))
2775 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2776 xmit_type);
2777 else
2778 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2779 }
2780
2781 /* Set the PBD's parsing_data field if not zero
2782 * (for the chips newer than 57711).
2783 */
2784 if (pbd_e2_parsing_data)
2785 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2786
2787 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2788
2789 /* Handle fragmented skb */
2790 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2791 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2792
2793 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2794 frag->page_offset, frag->size,
2795 DMA_TO_DEVICE);
2796 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2797
2798 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2799 "dropping packet...\n");
2800
2801 /* we need unmap all buffers already mapped
2802 * for this SKB;
2803 * first_bd->nbd need to be properly updated
2804 * before call to bnx2x_free_tx_pkt
2805 */
2806 first_bd->nbd = cpu_to_le16(nbd);
2807 bnx2x_free_tx_pkt(bp, txdata,
2808 TX_BD(txdata->tx_pkt_prod));
2809 return NETDEV_TX_OK;
2810 }
2811
2812 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2813 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2814 if (total_pkt_bd == NULL)
2815 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2816
2817 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2818 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2819 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2820 le16_add_cpu(&pkt_size, frag->size);
2821 nbd++;
2822
2823 DP(NETIF_MSG_TX_QUEUED,
2824 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2825 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2826 le16_to_cpu(tx_data_bd->nbytes));
2827 }
2828
2829 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2830
2831 /* update with actual num BDs */
2832 first_bd->nbd = cpu_to_le16(nbd);
2833
2834 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2835
2836 /* now send a tx doorbell, counting the next BD
2837 * if the packet contains or ends with it
2838 */
2839 if (TX_BD_POFF(bd_prod) < nbd)
2840 nbd++;
2841
2842 /* total_pkt_bytes should be set on the first data BD if
2843 * it's not an LSO packet and there is more than one
2844 * data BD. In this case pkt_size is limited by an MTU value.
2845 * However we prefer to set it for an LSO packet (while we don't
2846 * have to) in order to save some CPU cycles in a none-LSO
2847 * case, when we much more care about them.
2848 */
2849 if (total_pkt_bd != NULL)
2850 total_pkt_bd->total_pkt_bytes = pkt_size;
2851
2852 if (pbd_e1x)
2853 DP(NETIF_MSG_TX_QUEUED,
2854 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2855 " tcp_flags %x xsum %x seq %u hlen %u\n",
2856 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2857 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2858 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2859 le16_to_cpu(pbd_e1x->total_hlen_w));
2860 if (pbd_e2)
2861 DP(NETIF_MSG_TX_QUEUED,
2862 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2863 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2864 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2865 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2866 pbd_e2->parsing_data);
2867 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2868
2869 txdata->tx_pkt_prod++;
2870 /*
2871 * Make sure that the BD data is updated before updating the producer
2872 * since FW might read the BD right after the producer is updated.
2873 * This is only applicable for weak-ordered memory model archs such
2874 * as IA-64. The following barrier is also mandatory since FW will
2875 * assumes packets must have BDs.
2876 */
2877 wmb();
2878
2879 txdata->tx_db.data.prod += nbd;
2880 barrier();
2881
2882 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2883
2884 mmiowb();
2885
2886 txdata->tx_bd_prod += nbd;
2887
2888 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2889 netif_tx_stop_queue(txq);
2890
2891 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2892 * ordering of set_bit() in netif_tx_stop_queue() and read of
2893 * fp->bd_tx_cons */
2894 smp_mb();
2895
2896 fp->eth_q_stats.driver_xoff++;
2897 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2898 netif_tx_wake_queue(txq);
2899 }
2900 txdata->tx_pkt++;
2901
2902 return NETDEV_TX_OK;
2903}
2904
2905/**
2906 * bnx2x_setup_tc - routine to configure net_device for multi tc
2907 *
2908 * @netdev: net device to configure
2909 * @tc: number of traffic classes to enable
2910 *
2911 * callback connected to the ndo_setup_tc function pointer
2912 */
2913int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2914{
2915 int cos, prio, count, offset;
2916 struct bnx2x *bp = netdev_priv(dev);
2917
2918 /* setup tc must be called under rtnl lock */
2919 ASSERT_RTNL();
2920
2921 /* no traffic classes requested. aborting */
2922 if (!num_tc) {
2923 netdev_reset_tc(dev);
2924 return 0;
2925 }
2926
2927 /* requested to support too many traffic classes */
2928 if (num_tc > bp->max_cos) {
2929 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2930 " requested: %d. max supported is %d",
2931 num_tc, bp->max_cos);
2932 return -EINVAL;
2933 }
2934
2935 /* declare amount of supported traffic classes */
2936 if (netdev_set_num_tc(dev, num_tc)) {
2937 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
2938 num_tc);
2939 return -EINVAL;
2940 }
2941
2942 /* configure priority to traffic class mapping */
2943 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2944 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2945 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
2946 prio, bp->prio_to_cos[prio]);
2947 }
2948
2949
2950 /* Use this configuration to diffrentiate tc0 from other COSes
2951 This can be used for ets or pfc, and save the effort of setting
2952 up a multio class queue disc or negotiating DCBX with a switch
2953 netdev_set_prio_tc_map(dev, 0, 0);
2954 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2955 for (prio = 1; prio < 16; prio++) {
2956 netdev_set_prio_tc_map(dev, prio, 1);
2957 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2958 } */
2959
2960 /* configure traffic class to transmission queue mapping */
2961 for (cos = 0; cos < bp->max_cos; cos++) {
2962 count = BNX2X_NUM_ETH_QUEUES(bp);
2963 offset = cos * MAX_TXQS_PER_COS;
2964 netdev_set_tc_queue(dev, cos, count, offset);
2965 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
2966 cos, offset, count);
2967 }
2968
2969 return 0;
2970}
2971
2972/* called with rtnl_lock */
2973int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2974{
2975 struct sockaddr *addr = p;
2976 struct bnx2x *bp = netdev_priv(dev);
2977 int rc = 0;
2978
2979 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2980 return -EINVAL;
2981
2982 if (netif_running(dev)) {
2983 rc = bnx2x_set_eth_mac(bp, false);
2984 if (rc)
2985 return rc;
2986 }
2987
2988 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2989
2990 if (netif_running(dev))
2991 rc = bnx2x_set_eth_mac(bp, true);
2992
2993 return rc;
2994}
2995
2996static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2997{
2998 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2999 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3000 u8 cos;
3001
3002 /* Common */
3003#ifdef BCM_CNIC
3004 if (IS_FCOE_IDX(fp_index)) {
3005 memset(sb, 0, sizeof(union host_hc_status_block));
3006 fp->status_blk_mapping = 0;
3007
3008 } else {
3009#endif
3010 /* status blocks */
3011 if (!CHIP_IS_E1x(bp))
3012 BNX2X_PCI_FREE(sb->e2_sb,
3013 bnx2x_fp(bp, fp_index,
3014 status_blk_mapping),
3015 sizeof(struct host_hc_status_block_e2));
3016 else
3017 BNX2X_PCI_FREE(sb->e1x_sb,
3018 bnx2x_fp(bp, fp_index,
3019 status_blk_mapping),
3020 sizeof(struct host_hc_status_block_e1x));
3021#ifdef BCM_CNIC
3022 }
3023#endif
3024 /* Rx */
3025 if (!skip_rx_queue(bp, fp_index)) {
3026 bnx2x_free_rx_bds(fp);
3027
3028 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3029 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3030 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3031 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3032 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3033
3034 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3035 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3036 sizeof(struct eth_fast_path_rx_cqe) *
3037 NUM_RCQ_BD);
3038
3039 /* SGE ring */
3040 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3041 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3042 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3043 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3044 }
3045
3046 /* Tx */
3047 if (!skip_tx_queue(bp, fp_index)) {
3048 /* fastpath tx rings: tx_buf tx_desc */
3049 for_each_cos_in_tx_queue(fp, cos) {
3050 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3051
3052 DP(BNX2X_MSG_SP,
3053 "freeing tx memory of fp %d cos %d cid %d",
3054 fp_index, cos, txdata->cid);
3055
3056 BNX2X_FREE(txdata->tx_buf_ring);
3057 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3058 txdata->tx_desc_mapping,
3059 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3060 }
3061 }
3062 /* end of fastpath */
3063}
3064
3065void bnx2x_free_fp_mem(struct bnx2x *bp)
3066{
3067 int i;
3068 for_each_queue(bp, i)
3069 bnx2x_free_fp_mem_at(bp, i);
3070}
3071
3072static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3073{
3074 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3075 if (!CHIP_IS_E1x(bp)) {
3076 bnx2x_fp(bp, index, sb_index_values) =
3077 (__le16 *)status_blk.e2_sb->sb.index_values;
3078 bnx2x_fp(bp, index, sb_running_index) =
3079 (__le16 *)status_blk.e2_sb->sb.running_index;
3080 } else {
3081 bnx2x_fp(bp, index, sb_index_values) =
3082 (__le16 *)status_blk.e1x_sb->sb.index_values;
3083 bnx2x_fp(bp, index, sb_running_index) =
3084 (__le16 *)status_blk.e1x_sb->sb.running_index;
3085 }
3086}
3087
3088static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3089{
3090 union host_hc_status_block *sb;
3091 struct bnx2x_fastpath *fp = &bp->fp[index];
3092 int ring_size = 0;
3093 u8 cos;
3094 int rx_ring_size = 0;
3095
3096 /* if rx_ring_size specified - use it */
3097 if (!bp->rx_ring_size) {
3098
3099 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3100
3101 /* allocate at least number of buffers required by FW */
3102 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3103 MIN_RX_SIZE_TPA, rx_ring_size);
3104
3105 bp->rx_ring_size = rx_ring_size;
3106 } else
3107 rx_ring_size = bp->rx_ring_size;
3108
3109 /* Common */
3110 sb = &bnx2x_fp(bp, index, status_blk);
3111#ifdef BCM_CNIC
3112 if (!IS_FCOE_IDX(index)) {
3113#endif
3114 /* status blocks */
3115 if (!CHIP_IS_E1x(bp))
3116 BNX2X_PCI_ALLOC(sb->e2_sb,
3117 &bnx2x_fp(bp, index, status_blk_mapping),
3118 sizeof(struct host_hc_status_block_e2));
3119 else
3120 BNX2X_PCI_ALLOC(sb->e1x_sb,
3121 &bnx2x_fp(bp, index, status_blk_mapping),
3122 sizeof(struct host_hc_status_block_e1x));
3123#ifdef BCM_CNIC
3124 }
3125#endif
3126
3127 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3128 * set shortcuts for it.
3129 */
3130 if (!IS_FCOE_IDX(index))
3131 set_sb_shortcuts(bp, index);
3132
3133 /* Tx */
3134 if (!skip_tx_queue(bp, index)) {
3135 /* fastpath tx rings: tx_buf tx_desc */
3136 for_each_cos_in_tx_queue(fp, cos) {
3137 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3138
3139 DP(BNX2X_MSG_SP, "allocating tx memory of "
3140 "fp %d cos %d",
3141 index, cos);
3142
3143 BNX2X_ALLOC(txdata->tx_buf_ring,
3144 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3145 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3146 &txdata->tx_desc_mapping,
3147 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3148 }
3149 }
3150
3151 /* Rx */
3152 if (!skip_rx_queue(bp, index)) {
3153 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3154 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3155 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3156 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3157 &bnx2x_fp(bp, index, rx_desc_mapping),
3158 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3159
3160 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3161 &bnx2x_fp(bp, index, rx_comp_mapping),
3162 sizeof(struct eth_fast_path_rx_cqe) *
3163 NUM_RCQ_BD);
3164
3165 /* SGE ring */
3166 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3167 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3168 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3169 &bnx2x_fp(bp, index, rx_sge_mapping),
3170 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3171 /* RX BD ring */
3172 bnx2x_set_next_page_rx_bd(fp);
3173
3174 /* CQ ring */
3175 bnx2x_set_next_page_rx_cq(fp);
3176
3177 /* BDs */
3178 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3179 if (ring_size < rx_ring_size)
3180 goto alloc_mem_err;
3181 }
3182
3183 return 0;
3184
3185/* handles low memory cases */
3186alloc_mem_err:
3187 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3188 index, ring_size);
3189 /* FW will drop all packets if queue is not big enough,
3190 * In these cases we disable the queue
3191 * Min size is different for OOO, TPA and non-TPA queues
3192 */
3193 if (ring_size < (fp->disable_tpa ?
3194 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3195 /* release memory allocated for this queue */
3196 bnx2x_free_fp_mem_at(bp, index);
3197 return -ENOMEM;
3198 }
3199 return 0;
3200}
3201
3202int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3203{
3204 int i;
3205
3206 /**
3207 * 1. Allocate FP for leading - fatal if error
3208 * 2. {CNIC} Allocate FCoE FP - fatal if error
3209 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3210 * 4. Allocate RSS - fix number of queues if error
3211 */
3212
3213 /* leading */
3214 if (bnx2x_alloc_fp_mem_at(bp, 0))
3215 return -ENOMEM;
3216
3217#ifdef BCM_CNIC
3218 if (!NO_FCOE(bp))
3219 /* FCoE */
3220 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3221 /* we will fail load process instead of mark
3222 * NO_FCOE_FLAG
3223 */
3224 return -ENOMEM;
3225#endif
3226
3227 /* RSS */
3228 for_each_nondefault_eth_queue(bp, i)
3229 if (bnx2x_alloc_fp_mem_at(bp, i))
3230 break;
3231
3232 /* handle memory failures */
3233 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3234 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3235
3236 WARN_ON(delta < 0);
3237#ifdef BCM_CNIC
3238 /**
3239 * move non eth FPs next to last eth FP
3240 * must be done in that order
3241 * FCOE_IDX < FWD_IDX < OOO_IDX
3242 */
3243
3244 /* move FCoE fp even NO_FCOE_FLAG is on */
3245 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3246#endif
3247 bp->num_queues -= delta;
3248 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3249 bp->num_queues + delta, bp->num_queues);
3250 }
3251
3252 return 0;
3253}
3254
3255void bnx2x_free_mem_bp(struct bnx2x *bp)
3256{
3257 kfree(bp->fp);
3258 kfree(bp->msix_table);
3259 kfree(bp->ilt);
3260}
3261
3262int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3263{
3264 struct bnx2x_fastpath *fp;
3265 struct msix_entry *tbl;
3266 struct bnx2x_ilt *ilt;
3267 int msix_table_size = 0;
3268
3269 /*
3270 * The biggest MSI-X table we might need is as a maximum number of fast
3271 * path IGU SBs plus default SB (for PF).
3272 */
3273 msix_table_size = bp->igu_sb_cnt + 1;
3274
3275 /* fp array: RSS plus CNIC related L2 queues */
3276 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3277 sizeof(*fp), GFP_KERNEL);
3278 if (!fp)
3279 goto alloc_err;
3280 bp->fp = fp;
3281
3282 /* msix table */
3283 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
3284 if (!tbl)
3285 goto alloc_err;
3286 bp->msix_table = tbl;
3287
3288 /* ilt */
3289 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3290 if (!ilt)
3291 goto alloc_err;
3292 bp->ilt = ilt;
3293
3294 return 0;
3295alloc_err:
3296 bnx2x_free_mem_bp(bp);
3297 return -ENOMEM;
3298
3299}
3300
3301int bnx2x_reload_if_running(struct net_device *dev)
3302{
3303 struct bnx2x *bp = netdev_priv(dev);
3304
3305 if (unlikely(!netif_running(dev)))
3306 return 0;
3307
3308 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3309 return bnx2x_nic_load(bp, LOAD_NORMAL);
3310}
3311
3312int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3313{
3314 u32 sel_phy_idx = 0;
3315 if (bp->link_params.num_phys <= 1)
3316 return INT_PHY;
3317
3318 if (bp->link_vars.link_up) {
3319 sel_phy_idx = EXT_PHY1;
3320 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3321 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3322 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3323 sel_phy_idx = EXT_PHY2;
3324 } else {
3325
3326 switch (bnx2x_phy_selection(&bp->link_params)) {
3327 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3328 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3329 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3330 sel_phy_idx = EXT_PHY1;
3331 break;
3332 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3333 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3334 sel_phy_idx = EXT_PHY2;
3335 break;
3336 }
3337 }
3338
3339 return sel_phy_idx;
3340
3341}
3342int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3343{
3344 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3345 /*
3346 * The selected actived PHY is always after swapping (in case PHY
3347 * swapping is enabled). So when swapping is enabled, we need to reverse
3348 * the configuration
3349 */
3350
3351 if (bp->link_params.multi_phy_config &
3352 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3353 if (sel_phy_idx == EXT_PHY1)
3354 sel_phy_idx = EXT_PHY2;
3355 else if (sel_phy_idx == EXT_PHY2)
3356 sel_phy_idx = EXT_PHY1;
3357 }
3358 return LINK_CONFIG_IDX(sel_phy_idx);
3359}
3360
3361#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3362int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3363{
3364 struct bnx2x *bp = netdev_priv(dev);
3365 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3366
3367 switch (type) {
3368 case NETDEV_FCOE_WWNN:
3369 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3370 cp->fcoe_wwn_node_name_lo);
3371 break;
3372 case NETDEV_FCOE_WWPN:
3373 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3374 cp->fcoe_wwn_port_name_lo);
3375 break;
3376 default:
3377 return -EINVAL;
3378 }
3379
3380 return 0;
3381}
3382#endif
3383
3384/* called with rtnl_lock */
3385int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3386{
3387 struct bnx2x *bp = netdev_priv(dev);
3388
3389 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3390 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3391 return -EAGAIN;
3392 }
3393
3394 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3395 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3396 return -EINVAL;
3397
3398 /* This does not race with packet allocation
3399 * because the actual alloc size is
3400 * only updated as part of load
3401 */
3402 dev->mtu = new_mtu;
3403
3404 return bnx2x_reload_if_running(dev);
3405}
3406
3407u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3408{
3409 struct bnx2x *bp = netdev_priv(dev);
3410
3411 /* TPA requires Rx CSUM offloading */
3412 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3413 features &= ~NETIF_F_LRO;
3414
3415 return features;
3416}
3417
3418int bnx2x_set_features(struct net_device *dev, u32 features)
3419{
3420 struct bnx2x *bp = netdev_priv(dev);
3421 u32 flags = bp->flags;
3422 bool bnx2x_reload = false;
3423
3424 if (features & NETIF_F_LRO)
3425 flags |= TPA_ENABLE_FLAG;
3426 else
3427 flags &= ~TPA_ENABLE_FLAG;
3428
3429 if (features & NETIF_F_LOOPBACK) {
3430 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3431 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3432 bnx2x_reload = true;
3433 }
3434 } else {
3435 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3436 bp->link_params.loopback_mode = LOOPBACK_NONE;
3437 bnx2x_reload = true;
3438 }
3439 }
3440
3441 if (flags ^ bp->flags) {
3442 bp->flags = flags;
3443 bnx2x_reload = true;
3444 }
3445
3446 if (bnx2x_reload) {
3447 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3448 return bnx2x_reload_if_running(dev);
3449 /* else: bnx2x_nic_load() will be called at end of recovery */
3450 }
3451
3452 return 0;
3453}
3454
3455void bnx2x_tx_timeout(struct net_device *dev)
3456{
3457 struct bnx2x *bp = netdev_priv(dev);
3458
3459#ifdef BNX2X_STOP_ON_ERROR
3460 if (!bp->panic)
3461 bnx2x_panic();
3462#endif
3463
3464 smp_mb__before_clear_bit();
3465 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3466 smp_mb__after_clear_bit();
3467
3468 /* This allows the netif to be shutdown gracefully before resetting */
3469 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3470}
3471
3472int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3473{
3474 struct net_device *dev = pci_get_drvdata(pdev);
3475 struct bnx2x *bp;
3476
3477 if (!dev) {
3478 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3479 return -ENODEV;
3480 }
3481 bp = netdev_priv(dev);
3482
3483 rtnl_lock();
3484
3485 pci_save_state(pdev);
3486
3487 if (!netif_running(dev)) {
3488 rtnl_unlock();
3489 return 0;
3490 }
3491
3492 netif_device_detach(dev);
3493
3494 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3495
3496 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3497
3498 rtnl_unlock();
3499
3500 return 0;
3501}
3502
3503int bnx2x_resume(struct pci_dev *pdev)
3504{
3505 struct net_device *dev = pci_get_drvdata(pdev);
3506 struct bnx2x *bp;
3507 int rc;
3508
3509 if (!dev) {
3510 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3511 return -ENODEV;
3512 }
3513 bp = netdev_priv(dev);
3514
3515 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3516 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3517 return -EAGAIN;
3518 }
3519
3520 rtnl_lock();
3521
3522 pci_restore_state(pdev);
3523
3524 if (!netif_running(dev)) {
3525 rtnl_unlock();
3526 return 0;
3527 }
3528
3529 bnx2x_set_power_state(bp, PCI_D0);
3530 netif_device_attach(dev);
3531
3532 /* Since the chip was reset, clear the FW sequence number */
3533 bp->fw_seq = 0;
3534 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3535
3536 rtnl_unlock();
3537
3538 return rc;
3539}
3540
3541
3542void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3543 u32 cid)
3544{
3545 /* ustorm cxt validation */
3546 cxt->ustorm_ag_context.cdu_usage =
3547 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3548 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3549 /* xcontext validation */
3550 cxt->xstorm_ag_context.cdu_reserved =
3551 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3552 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3553}
3554
3555static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3556 u8 fw_sb_id, u8 sb_index,
3557 u8 ticks)
3558{
3559
3560 u32 addr = BAR_CSTRORM_INTMEM +
3561 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3562 REG_WR8(bp, addr, ticks);
3563 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3564 port, fw_sb_id, sb_index, ticks);
3565}
3566
3567static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3568 u16 fw_sb_id, u8 sb_index,
3569 u8 disable)
3570{
3571 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3572 u32 addr = BAR_CSTRORM_INTMEM +
3573 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3574 u16 flags = REG_RD16(bp, addr);
3575 /* clear and set */
3576 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3577 flags |= enable_flag;
3578 REG_WR16(bp, addr, flags);
3579 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3580 port, fw_sb_id, sb_index, disable);
3581}
3582
3583void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3584 u8 sb_index, u8 disable, u16 usec)
3585{
3586 int port = BP_PORT(bp);
3587 u8 ticks = usec / BNX2X_BTR;
3588
3589 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3590
3591 disable = disable ? 1 : (usec ? 0 : 1);
3592 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3593}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
new file mode 100644
index 00000000000..2dc1199239d
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,1491 @@
1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23
24
25#include "bnx2x.h"
26
27/* This is used as a replacement for an MCP if it's not present */
28extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
29
30extern int num_queues;
31
32/************************ Macros ********************************/
33#define BNX2X_PCI_FREE(x, y, size) \
34 do { \
35 if (x) { \
36 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
37 x = NULL; \
38 y = 0; \
39 } \
40 } while (0)
41
42#define BNX2X_FREE(x) \
43 do { \
44 if (x) { \
45 kfree((void *)x); \
46 x = NULL; \
47 } \
48 } while (0)
49
50#define BNX2X_PCI_ALLOC(x, y, size) \
51 do { \
52 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
53 if (x == NULL) \
54 goto alloc_mem_err; \
55 memset((void *)x, 0, size); \
56 } while (0)
57
58#define BNX2X_ALLOC(x, size) \
59 do { \
60 x = kzalloc(size, GFP_KERNEL); \
61 if (x == NULL) \
62 goto alloc_mem_err; \
63 } while (0)
64
65/*********************** Interfaces ****************************
66 * Functions that need to be implemented by each driver version
67 */
68/* Init */
69
70/**
71 * bnx2x_send_unload_req - request unload mode from the MCP.
72 *
73 * @bp: driver handle
74 * @unload_mode: requested function's unload mode
75 *
76 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
77 */
78u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
79
80/**
81 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
82 *
83 * @bp: driver handle
84 */
85void bnx2x_send_unload_done(struct bnx2x *bp);
86
87/**
88 * bnx2x_config_rss_pf - configure RSS parameters.
89 *
90 * @bp: driver handle
91 * @ind_table: indirection table to configure
92 * @config_hash: re-configure RSS hash keys configuration
93 */
94int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
95
96/**
97 * bnx2x__init_func_obj - init function object
98 *
99 * @bp: driver handle
100 *
101 * Initializes the Function Object with the appropriate
102 * parameters which include a function slow path driver
103 * interface.
104 */
105void bnx2x__init_func_obj(struct bnx2x *bp);
106
107/**
108 * bnx2x_setup_queue - setup eth queue.
109 *
110 * @bp: driver handle
111 * @fp: pointer to the fastpath structure
112 * @leading: boolean
113 *
114 */
115int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
116 bool leading);
117
118/**
119 * bnx2x_setup_leading - bring up a leading eth queue.
120 *
121 * @bp: driver handle
122 */
123int bnx2x_setup_leading(struct bnx2x *bp);
124
125/**
126 * bnx2x_fw_command - send the MCP a request
127 *
128 * @bp: driver handle
129 * @command: request
130 * @param: request's parameter
131 *
132 * block until there is a reply
133 */
134u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
135
136/**
137 * bnx2x_initial_phy_init - initialize link parameters structure variables.
138 *
139 * @bp: driver handle
140 * @load_mode: current mode
141 */
142u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
143
144/**
145 * bnx2x_link_set - configure hw according to link parameters structure.
146 *
147 * @bp: driver handle
148 */
149void bnx2x_link_set(struct bnx2x *bp);
150
151/**
152 * bnx2x_link_test - query link status.
153 *
154 * @bp: driver handle
155 * @is_serdes: bool
156 *
157 * Returns 0 if link is UP.
158 */
159u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
160
161/**
162 * bnx2x_drv_pulse - write driver pulse to shmem
163 *
164 * @bp: driver handle
165 *
166 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
167 * in the shmem.
168 */
169void bnx2x_drv_pulse(struct bnx2x *bp);
170
171/**
172 * bnx2x_igu_ack_sb - update IGU with current SB value
173 *
174 * @bp: driver handle
175 * @igu_sb_id: SB id
176 * @segment: SB segment
177 * @index: SB index
178 * @op: SB operation
179 * @update: is HW update required
180 */
181void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
182 u16 index, u8 op, u8 update);
183
184/* Disable transactions from chip to host */
185void bnx2x_pf_disable(struct bnx2x *bp);
186
187/**
188 * bnx2x__link_status_update - handles link status change.
189 *
190 * @bp: driver handle
191 */
192void bnx2x__link_status_update(struct bnx2x *bp);
193
194/**
195 * bnx2x_link_report - report link status to upper layer.
196 *
197 * @bp: driver handle
198 */
199void bnx2x_link_report(struct bnx2x *bp);
200
201/* None-atomic version of bnx2x_link_report() */
202void __bnx2x_link_report(struct bnx2x *bp);
203
204/**
205 * bnx2x_get_mf_speed - calculate MF speed.
206 *
207 * @bp: driver handle
208 *
209 * Takes into account current linespeed and MF configuration.
210 */
211u16 bnx2x_get_mf_speed(struct bnx2x *bp);
212
213/**
214 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
215 *
216 * @irq: irq number
217 * @dev_instance: private instance
218 */
219irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
220
221/**
222 * bnx2x_interrupt - non MSI-X interrupt handler
223 *
224 * @irq: irq number
225 * @dev_instance: private instance
226 */
227irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
228#ifdef BCM_CNIC
229
230/**
231 * bnx2x_cnic_notify - send command to cnic driver
232 *
233 * @bp: driver handle
234 * @cmd: command
235 */
236int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
237
238/**
239 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
240 *
241 * @bp: driver handle
242 */
243void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
244#endif
245
246/**
247 * bnx2x_int_enable - enable HW interrupts.
248 *
249 * @bp: driver handle
250 */
251void bnx2x_int_enable(struct bnx2x *bp);
252
253/**
254 * bnx2x_int_disable_sync - disable interrupts.
255 *
256 * @bp: driver handle
257 * @disable_hw: true, disable HW interrupts.
258 *
259 * This function ensures that there are no
260 * ISRs or SP DPCs (sp_task) are running after it returns.
261 */
262void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
263
264/**
265 * bnx2x_nic_init - init driver internals.
266 *
267 * @bp: driver handle
268 * @load_code: COMMON, PORT or FUNCTION
269 *
270 * Initializes:
271 * - rings
272 * - status blocks
273 * - etc.
274 */
275void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
276
277/**
278 * bnx2x_alloc_mem - allocate driver's memory.
279 *
280 * @bp: driver handle
281 */
282int bnx2x_alloc_mem(struct bnx2x *bp);
283
284/**
285 * bnx2x_free_mem - release driver's memory.
286 *
287 * @bp: driver handle
288 */
289void bnx2x_free_mem(struct bnx2x *bp);
290
291/**
292 * bnx2x_set_num_queues - set number of queues according to mode.
293 *
294 * @bp: driver handle
295 */
296void bnx2x_set_num_queues(struct bnx2x *bp);
297
298/**
299 * bnx2x_chip_cleanup - cleanup chip internals.
300 *
301 * @bp: driver handle
302 * @unload_mode: COMMON, PORT, FUNCTION
303 *
304 * - Cleanup MAC configuration.
305 * - Closes clients.
306 * - etc.
307 */
308void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
309
310/**
311 * bnx2x_acquire_hw_lock - acquire HW lock.
312 *
313 * @bp: driver handle
314 * @resource: resource bit which was locked
315 */
316int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
317
318/**
319 * bnx2x_release_hw_lock - release HW lock.
320 *
321 * @bp: driver handle
322 * @resource: resource bit which was locked
323 */
324int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
325
326/**
327 * bnx2x_release_leader_lock - release recovery leader lock
328 *
329 * @bp: driver handle
330 */
331int bnx2x_release_leader_lock(struct bnx2x *bp);
332
333/**
334 * bnx2x_set_eth_mac - configure eth MAC address in the HW
335 *
336 * @bp: driver handle
337 * @set: set or clear
338 *
339 * Configures according to the value in netdev->dev_addr.
340 */
341int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
342
343/**
344 * bnx2x_set_rx_mode - set MAC filtering configurations.
345 *
346 * @dev: netdevice
347 *
348 * called with netif_tx_lock from dev_mcast.c
349 * If bp->state is OPEN, should be called with
350 * netif_addr_lock_bh()
351 */
352void bnx2x_set_rx_mode(struct net_device *dev);
353
354/**
355 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
356 *
357 * @bp: driver handle
358 *
359 * If bp->state is OPEN, should be called with
360 * netif_addr_lock_bh().
361 */
362void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
363
364/**
365 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
366 *
367 * @bp: driver handle
368 * @cl_id: client id
369 * @rx_mode_flags: rx mode configuration
370 * @rx_accept_flags: rx accept configuration
371 * @tx_accept_flags: tx accept configuration (tx switch)
372 * @ramrod_flags: ramrod configuration
373 */
374void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
375 unsigned long rx_mode_flags,
376 unsigned long rx_accept_flags,
377 unsigned long tx_accept_flags,
378 unsigned long ramrod_flags);
379
380/* Parity errors related */
381void bnx2x_inc_load_cnt(struct bnx2x *bp);
382u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
383bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
384bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
385void bnx2x_set_reset_in_progress(struct bnx2x *bp);
386void bnx2x_set_reset_global(struct bnx2x *bp);
387void bnx2x_disable_close_the_gate(struct bnx2x *bp);
388
389/**
390 * bnx2x_sp_event - handle ramrods completion.
391 *
392 * @fp: fastpath handle for the event
393 * @rr_cqe: eth_rx_cqe
394 */
395void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
396
397/**
398 * bnx2x_ilt_set_info - prepare ILT configurations.
399 *
400 * @bp: driver handle
401 */
402void bnx2x_ilt_set_info(struct bnx2x *bp);
403
404/**
405 * bnx2x_dcbx_init - initialize dcbx protocol.
406 *
407 * @bp: driver handle
408 */
409void bnx2x_dcbx_init(struct bnx2x *bp);
410
411/**
412 * bnx2x_set_power_state - set power state to the requested value.
413 *
414 * @bp: driver handle
415 * @state: required state D0 or D3hot
416 *
417 * Currently only D0 and D3hot are supported.
418 */
419int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
420
421/**
422 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
423 *
424 * @bp: driver handle
425 * @value: new value
426 */
427void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
428/* Error handling */
429void bnx2x_panic_dump(struct bnx2x *bp);
430
431void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
432
433/* dev_close main block */
434int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
435
436/* dev_open main block */
437int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
438
439/* hard_xmit callback */
440netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
441
442/* setup_tc callback */
443int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
444
445/* select_queue callback */
446u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
447
448/* reload helper */
449int bnx2x_reload_if_running(struct net_device *dev);
450
451int bnx2x_change_mac_addr(struct net_device *dev, void *p);
452
453/* NAPI poll Rx part */
454int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
455
456void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
457 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
458
459/* NAPI poll Tx part */
460int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
461
462/* suspend/resume callbacks */
463int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
464int bnx2x_resume(struct pci_dev *pdev);
465
466/* Release IRQ vectors */
467void bnx2x_free_irq(struct bnx2x *bp);
468
469void bnx2x_free_fp_mem(struct bnx2x *bp);
470int bnx2x_alloc_fp_mem(struct bnx2x *bp);
471void bnx2x_init_rx_rings(struct bnx2x *bp);
472void bnx2x_free_skbs(struct bnx2x *bp);
473void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
474void bnx2x_netif_start(struct bnx2x *bp);
475
476/**
477 * bnx2x_enable_msix - set msix configuration.
478 *
479 * @bp: driver handle
480 *
481 * fills msix_table, requests vectors, updates num_queues
482 * according to number of available vectors.
483 */
484int bnx2x_enable_msix(struct bnx2x *bp);
485
486/**
487 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
488 *
489 * @bp: driver handle
490 */
491int bnx2x_enable_msi(struct bnx2x *bp);
492
493/**
494 * bnx2x_poll - NAPI callback
495 *
496 * @napi: napi structure
497 * @budget:
498 *
499 */
500int bnx2x_poll(struct napi_struct *napi, int budget);
501
502/**
503 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
504 *
505 * @bp: driver handle
506 */
507int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
508
509/**
510 * bnx2x_free_mem_bp - release memories outsize main driver structure
511 *
512 * @bp: driver handle
513 */
514void bnx2x_free_mem_bp(struct bnx2x *bp);
515
516/**
517 * bnx2x_change_mtu - change mtu netdev callback
518 *
519 * @dev: net device
520 * @new_mtu: requested mtu
521 *
522 */
523int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
524
525#if defined(BCM_CNIC) && (defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE))
526/**
527 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
528 *
529 * @dev: net_device
530 * @wwn: output buffer
531 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
532 *
533 */
534int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
535#endif
536u32 bnx2x_fix_features(struct net_device *dev, u32 features);
537int bnx2x_set_features(struct net_device *dev, u32 features);
538
539/**
540 * bnx2x_tx_timeout - tx timeout netdev callback
541 *
542 * @dev: net device
543 */
544void bnx2x_tx_timeout(struct net_device *dev);
545
546/*********************** Inlines **********************************/
547/*********************** Fast path ********************************/
548static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
549{
550 barrier(); /* status block is written to by the chip */
551 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
552}
553
554static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
555 struct bnx2x_fastpath *fp, u16 bd_prod,
556 u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
557{
558 struct ustorm_eth_rx_producers rx_prods = {0};
559 u32 i;
560
561 /* Update producers */
562 rx_prods.bd_prod = bd_prod;
563 rx_prods.cqe_prod = rx_comp_prod;
564 rx_prods.sge_prod = rx_sge_prod;
565
566 /*
567 * Make sure that the BD and SGE data is updated before updating the
568 * producers since FW might read the BD/SGE right after the producer
569 * is updated.
570 * This is only applicable for weak-ordered memory model archs such
571 * as IA-64. The following barrier is also mandatory since FW will
572 * assumes BDs must have buffers.
573 */
574 wmb();
575
576 for (i = 0; i < sizeof(rx_prods)/4; i++)
577 REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
578
579 mmiowb(); /* keep prod updates ordered */
580
581 DP(NETIF_MSG_RX_STATUS,
582 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
583 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
584}
585
586static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
587 u8 segment, u16 index, u8 op,
588 u8 update, u32 igu_addr)
589{
590 struct igu_regular cmd_data = {0};
591
592 cmd_data.sb_id_and_flags =
593 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
594 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
595 (update << IGU_REGULAR_BUPDATE_SHIFT) |
596 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
597
598 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
599 cmd_data.sb_id_and_flags, igu_addr);
600 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
601
602 /* Make sure that ACK is written */
603 mmiowb();
604 barrier();
605}
606
607static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
608 u8 idu_sb_id, bool is_Pf)
609{
610 u32 data, ctl, cnt = 100;
611 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
612 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
613 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
614 u32 sb_bit = 1 << (idu_sb_id%32);
615 u32 func_encode = func |
616 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
617 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
618
619 /* Not supported in BC mode */
620 if (CHIP_INT_MODE_IS_BC(bp))
621 return;
622
623 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
624 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
625 IGU_REGULAR_CLEANUP_SET |
626 IGU_REGULAR_BCLEANUP;
627
628 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
629 func_encode << IGU_CTRL_REG_FID_SHIFT |
630 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
631
632 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
633 data, igu_addr_data);
634 REG_WR(bp, igu_addr_data, data);
635 mmiowb();
636 barrier();
637 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
638 ctl, igu_addr_ctl);
639 REG_WR(bp, igu_addr_ctl, ctl);
640 mmiowb();
641 barrier();
642
643 /* wait for clean up to finish */
644 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
645 msleep(20);
646
647
648 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
649 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
650 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
651 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
652 }
653}
654
655static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
656 u8 storm, u16 index, u8 op, u8 update)
657{
658 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
659 COMMAND_REG_INT_ACK);
660 struct igu_ack_register igu_ack;
661
662 igu_ack.status_block_index = index;
663 igu_ack.sb_id_and_flags =
664 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
665 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
666 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
667 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
668
669 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
670 (*(u32 *)&igu_ack), hc_addr);
671 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
672
673 /* Make sure that ACK is written */
674 mmiowb();
675 barrier();
676}
677
678static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
679 u16 index, u8 op, u8 update)
680{
681 if (bp->common.int_block == INT_BLOCK_HC)
682 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
683 else {
684 u8 segment;
685
686 if (CHIP_INT_MODE_IS_BC(bp))
687 segment = storm;
688 else if (igu_sb_id != bp->igu_dsb_id)
689 segment = IGU_SEG_ACCESS_DEF;
690 else if (storm == ATTENTION_ID)
691 segment = IGU_SEG_ACCESS_ATTN;
692 else
693 segment = IGU_SEG_ACCESS_DEF;
694 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
695 }
696}
697
698static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
699{
700 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
701 COMMAND_REG_SIMD_MASK);
702 u32 result = REG_RD(bp, hc_addr);
703
704 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
705 result, hc_addr);
706
707 barrier();
708 return result;
709}
710
711static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
712{
713 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
714 u32 result = REG_RD(bp, igu_addr);
715
716 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
717 result, igu_addr);
718
719 barrier();
720 return result;
721}
722
723static inline u16 bnx2x_ack_int(struct bnx2x *bp)
724{
725 barrier();
726 if (bp->common.int_block == INT_BLOCK_HC)
727 return bnx2x_hc_ack_int(bp);
728 else
729 return bnx2x_igu_ack_int(bp);
730}
731
732static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
733{
734 /* Tell compiler that consumer and producer can change */
735 barrier();
736 return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
737}
738
739static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
740 struct bnx2x_fp_txdata *txdata)
741{
742 s16 used;
743 u16 prod;
744 u16 cons;
745
746 prod = txdata->tx_bd_prod;
747 cons = txdata->tx_bd_cons;
748
749 /* NUM_TX_RINGS = number of "next-page" entries
750 It will be used as a threshold */
751 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
752
753#ifdef BNX2X_STOP_ON_ERROR
754 WARN_ON(used < 0);
755 WARN_ON(used > bp->tx_ring_size);
756 WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
757#endif
758
759 return (s16)(bp->tx_ring_size) - used;
760}
761
762static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
763{
764 u16 hw_cons;
765
766 /* Tell compiler that status block fields can change */
767 barrier();
768 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
769 return hw_cons != txdata->tx_pkt_cons;
770}
771
772static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
773{
774 u8 cos;
775 for_each_cos_in_tx_queue(fp, cos)
776 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
777 return true;
778 return false;
779}
780
781static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
782{
783 u16 rx_cons_sb;
784
785 /* Tell compiler that status block fields can change */
786 barrier();
787 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
788 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
789 rx_cons_sb++;
790 return (fp->rx_comp_cons != rx_cons_sb);
791}
792
793/**
794 * bnx2x_tx_disable - disables tx from stack point of view
795 *
796 * @bp: driver handle
797 */
798static inline void bnx2x_tx_disable(struct bnx2x *bp)
799{
800 netif_tx_disable(bp->dev);
801 netif_carrier_off(bp->dev);
802}
803
804static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
805 struct bnx2x_fastpath *fp, u16 index)
806{
807 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
808 struct page *page = sw_buf->page;
809 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
810
811 /* Skip "next page" elements */
812 if (!page)
813 return;
814
815 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
816 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
817 __free_pages(page, PAGES_PER_SGE_SHIFT);
818
819 sw_buf->page = NULL;
820 sge->addr_hi = 0;
821 sge->addr_lo = 0;
822}
823
824static inline void bnx2x_add_all_napi(struct bnx2x *bp)
825{
826 int i;
827
828 /* Add NAPI objects */
829 for_each_rx_queue(bp, i)
830 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
831 bnx2x_poll, BNX2X_NAPI_WEIGHT);
832}
833
834static inline void bnx2x_del_all_napi(struct bnx2x *bp)
835{
836 int i;
837
838 for_each_rx_queue(bp, i)
839 netif_napi_del(&bnx2x_fp(bp, i, napi));
840}
841
842static inline void bnx2x_disable_msi(struct bnx2x *bp)
843{
844 if (bp->flags & USING_MSIX_FLAG) {
845 pci_disable_msix(bp->pdev);
846 bp->flags &= ~USING_MSIX_FLAG;
847 } else if (bp->flags & USING_MSI_FLAG) {
848 pci_disable_msi(bp->pdev);
849 bp->flags &= ~USING_MSI_FLAG;
850 }
851}
852
853static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
854{
855 return num_queues ?
856 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
857 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
858}
859
860static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
861{
862 int i, j;
863
864 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
865 int idx = RX_SGE_CNT * i - 1;
866
867 for (j = 0; j < 2; j++) {
868 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
869 idx--;
870 }
871 }
872}
873
874static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
875{
876 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
877 memset(fp->sge_mask, 0xff,
878 (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
879
880 /* Clear the two last indices in the page to 1:
881 these are the indices that correspond to the "next" element,
882 hence will never be indicated and should be removed from
883 the calculations. */
884 bnx2x_clear_sge_mask_next_elems(fp);
885}
886
887static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
888 struct bnx2x_fastpath *fp, u16 index)
889{
890 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
891 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
892 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
893 dma_addr_t mapping;
894
895 if (unlikely(page == NULL))
896 return -ENOMEM;
897
898 mapping = dma_map_page(&bp->pdev->dev, page, 0,
899 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
900 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
901 __free_pages(page, PAGES_PER_SGE_SHIFT);
902 return -ENOMEM;
903 }
904
905 sw_buf->page = page;
906 dma_unmap_addr_set(sw_buf, mapping, mapping);
907
908 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
909 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
910
911 return 0;
912}
913
914static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
915 struct bnx2x_fastpath *fp, u16 index)
916{
917 struct sk_buff *skb;
918 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
919 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
920 dma_addr_t mapping;
921
922 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
923 if (unlikely(skb == NULL))
924 return -ENOMEM;
925
926 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
927 DMA_FROM_DEVICE);
928 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
929 dev_kfree_skb_any(skb);
930 return -ENOMEM;
931 }
932
933 rx_buf->skb = skb;
934 dma_unmap_addr_set(rx_buf, mapping, mapping);
935
936 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
937 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
938
939 return 0;
940}
941
942/* note that we are not allocating a new skb,
943 * we are just moving one from cons to prod
944 * we are not creating a new mapping,
945 * so there is no need to check for dma_mapping_error().
946 */
947static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
948 u16 cons, u16 prod)
949{
950 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
951 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
952 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
953 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
954
955 dma_unmap_addr_set(prod_rx_buf, mapping,
956 dma_unmap_addr(cons_rx_buf, mapping));
957 prod_rx_buf->skb = cons_rx_buf->skb;
958 *prod_bd = *cons_bd;
959}
960
961/************************* Init ******************************************/
962
963/**
964 * bnx2x_func_start - init function
965 *
966 * @bp: driver handle
967 *
968 * Must be called before sending CLIENT_SETUP for the first client.
969 */
970static inline int bnx2x_func_start(struct bnx2x *bp)
971{
972 struct bnx2x_func_state_params func_params = {0};
973 struct bnx2x_func_start_params *start_params =
974 &func_params.params.start;
975
976 /* Prepare parameters for function state transitions */
977 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
978
979 func_params.f_obj = &bp->func_obj;
980 func_params.cmd = BNX2X_F_CMD_START;
981
982 /* Function parameters */
983 start_params->mf_mode = bp->mf_mode;
984 start_params->sd_vlan_tag = bp->mf_ov;
985 if (CHIP_IS_E1x(bp))
986 start_params->network_cos_mode = OVERRIDE_COS;
987 else
988 start_params->network_cos_mode = STATIC_COS;
989
990 return bnx2x_func_state_change(bp, &func_params);
991}
992
993
994/**
995 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
996 *
997 * @fw_hi: pointer to upper part
998 * @fw_mid: pointer to middle part
999 * @fw_lo: pointer to lower part
1000 * @mac: pointer to MAC address
1001 */
1002static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
1003 u8 *mac)
1004{
1005 ((u8 *)fw_hi)[0] = mac[1];
1006 ((u8 *)fw_hi)[1] = mac[0];
1007 ((u8 *)fw_mid)[0] = mac[3];
1008 ((u8 *)fw_mid)[1] = mac[2];
1009 ((u8 *)fw_lo)[0] = mac[5];
1010 ((u8 *)fw_lo)[1] = mac[4];
1011}
1012
1013static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1014 struct bnx2x_fastpath *fp, int last)
1015{
1016 int i;
1017
1018 if (fp->disable_tpa)
1019 return;
1020
1021 for (i = 0; i < last; i++)
1022 bnx2x_free_rx_sge(bp, fp, i);
1023}
1024
1025static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
1026 struct bnx2x_fastpath *fp, int last)
1027{
1028 int i;
1029
1030 for (i = 0; i < last; i++) {
1031 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1032 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1033 struct sk_buff *skb = first_buf->skb;
1034
1035 if (skb == NULL) {
1036 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1037 continue;
1038 }
1039 if (tpa_info->tpa_state == BNX2X_TPA_START)
1040 dma_unmap_single(&bp->pdev->dev,
1041 dma_unmap_addr(first_buf, mapping),
1042 fp->rx_buf_size, DMA_FROM_DEVICE);
1043 dev_kfree_skb(skb);
1044 first_buf->skb = NULL;
1045 }
1046}
1047
1048static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
1049{
1050 int i;
1051
1052 for (i = 1; i <= NUM_TX_RINGS; i++) {
1053 struct eth_tx_next_bd *tx_next_bd =
1054 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
1055
1056 tx_next_bd->addr_hi =
1057 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
1058 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1059 tx_next_bd->addr_lo =
1060 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
1061 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1062 }
1063
1064 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
1065 txdata->tx_db.data.zero_fill1 = 0;
1066 txdata->tx_db.data.prod = 0;
1067
1068 txdata->tx_pkt_prod = 0;
1069 txdata->tx_pkt_cons = 0;
1070 txdata->tx_bd_prod = 0;
1071 txdata->tx_bd_cons = 0;
1072 txdata->tx_pkt = 0;
1073}
1074
1075static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
1076{
1077 int i;
1078 u8 cos;
1079
1080 for_each_tx_queue(bp, i)
1081 for_each_cos_in_tx_queue(&bp->fp[i], cos)
1082 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
1083}
1084
1085static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1086{
1087 int i;
1088
1089 for (i = 1; i <= NUM_RX_RINGS; i++) {
1090 struct eth_rx_bd *rx_bd;
1091
1092 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
1093 rx_bd->addr_hi =
1094 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
1095 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1096 rx_bd->addr_lo =
1097 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
1098 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1099 }
1100}
1101
1102static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1103{
1104 int i;
1105
1106 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1107 struct eth_rx_sge *sge;
1108
1109 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1110 sge->addr_hi =
1111 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1112 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1113
1114 sge->addr_lo =
1115 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1116 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1117 }
1118}
1119
1120static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
1121{
1122 int i;
1123 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
1124 struct eth_rx_cqe_next_page *nextpg;
1125
1126 nextpg = (struct eth_rx_cqe_next_page *)
1127 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
1128 nextpg->addr_hi =
1129 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
1130 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1131 nextpg->addr_lo =
1132 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
1133 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1134 }
1135}
1136
1137/* Returns the number of actually allocated BDs */
1138static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1139 int rx_ring_size)
1140{
1141 struct bnx2x *bp = fp->bp;
1142 u16 ring_prod, cqe_ring_prod;
1143 int i;
1144
1145 fp->rx_comp_cons = 0;
1146 cqe_ring_prod = ring_prod = 0;
1147
1148 /* This routine is called only during fo init so
1149 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1150 */
1151 for (i = 0; i < rx_ring_size; i++) {
1152 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
1153 fp->eth_q_stats.rx_skb_alloc_failed++;
1154 continue;
1155 }
1156 ring_prod = NEXT_RX_IDX(ring_prod);
1157 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1158 WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
1159 }
1160
1161 if (fp->eth_q_stats.rx_skb_alloc_failed)
1162 BNX2X_ERR("was only able to allocate "
1163 "%d rx skbs on queue[%d]\n",
1164 (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
1165
1166 fp->rx_bd_prod = ring_prod;
1167 /* Limit the CQE producer by the CQE ring size */
1168 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1169 cqe_ring_prod);
1170 fp->rx_pkt = fp->rx_calls = 0;
1171
1172 return i - fp->eth_q_stats.rx_skb_alloc_failed;
1173}
1174
1175/* Statistics ID are global per chip/path, while Client IDs for E1x are per
1176 * port.
1177 */
1178static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
1179{
1180 if (!CHIP_IS_E1x(fp->bp))
1181 return fp->cl_id;
1182 else
1183 return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
1184}
1185
1186static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
1187 bnx2x_obj_type obj_type)
1188{
1189 struct bnx2x *bp = fp->bp;
1190
1191 /* Configure classification DBs */
1192 bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
1193 BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
1194 bnx2x_sp_mapping(bp, mac_rdata),
1195 BNX2X_FILTER_MAC_PENDING,
1196 &bp->sp_state, obj_type,
1197 &bp->macs_pool);
1198}
1199
1200/**
1201 * bnx2x_get_path_func_num - get number of active functions
1202 *
1203 * @bp: driver handle
1204 *
1205 * Calculates the number of active (not hidden) functions on the
1206 * current path.
1207 */
1208static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
1209{
1210 u8 func_num = 0, i;
1211
1212 /* 57710 has only one function per-port */
1213 if (CHIP_IS_E1(bp))
1214 return 1;
1215
1216 /* Calculate a number of functions enabled on the current
1217 * PATH/PORT.
1218 */
1219 if (CHIP_REV_IS_SLOW(bp)) {
1220 if (IS_MF(bp))
1221 func_num = 4;
1222 else
1223 func_num = 2;
1224 } else {
1225 for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1226 u32 func_config =
1227 MF_CFG_RD(bp,
1228 func_mf_config[BP_PORT(bp) + 2 * i].
1229 config);
1230 func_num +=
1231 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1232 }
1233 }
1234
1235 WARN_ON(!func_num);
1236
1237 return func_num;
1238}
1239
1240static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
1241{
1242 /* RX_MODE controlling object */
1243 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
1244
1245 /* multicast configuration controlling object */
1246 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
1247 BP_FUNC(bp), BP_FUNC(bp),
1248 bnx2x_sp(bp, mcast_rdata),
1249 bnx2x_sp_mapping(bp, mcast_rdata),
1250 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
1251 BNX2X_OBJ_TYPE_RX);
1252
1253 /* Setup CAM credit pools */
1254 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
1255 bnx2x_get_path_func_num(bp));
1256
1257 /* RSS configuration object */
1258 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
1259 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
1260 bnx2x_sp(bp, rss_rdata),
1261 bnx2x_sp_mapping(bp, rss_rdata),
1262 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
1263 BNX2X_OBJ_TYPE_RX);
1264}
1265
1266static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
1267{
1268 if (CHIP_IS_E1x(fp->bp))
1269 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
1270 else
1271 return fp->cl_id;
1272}
1273
1274static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1275{
1276 struct bnx2x *bp = fp->bp;
1277
1278 if (!CHIP_IS_E1x(bp))
1279 return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
1280 else
1281 return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
1282}
1283
1284static inline void bnx2x_init_txdata(struct bnx2x *bp,
1285 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
1286 __le16 *tx_cons_sb)
1287{
1288 txdata->cid = cid;
1289 txdata->txq_index = txq_index;
1290 txdata->tx_cons_sb = tx_cons_sb;
1291
1292 DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d",
1293 txdata->cid, txdata->txq_index);
1294}
1295
1296#ifdef BCM_CNIC
1297static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1298{
1299 return bp->cnic_base_cl_id + cl_idx +
1300 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
1301}
1302
1303static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
1304{
1305
1306 /* the 'first' id is allocated for the cnic */
1307 return bp->base_fw_ndsb;
1308}
1309
1310static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
1311{
1312 return bp->igu_base_sb;
1313}
1314
1315
1316static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1317{
1318 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
1319 unsigned long q_type = 0;
1320
1321 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
1322 BNX2X_FCOE_ETH_CL_ID_IDX);
1323 /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
1324 * 16 ETH clients per function when CNIC is enabled!
1325 *
1326 * Fix it ASAP!!!
1327 */
1328 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1329 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1330 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1331 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1332
1333 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
1334 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
1335
1336 DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index);
1337
1338 /* qZone id equals to FW (per path) client id */
1339 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
1340 /* init shortcut */
1341 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
1342 bnx2x_rx_ustorm_prods_offset(fp);
1343
1344 /* Configure Queue State object */
1345 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1346 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1347
1348 /* No multi-CoS for FCoE L2 client */
1349 BUG_ON(fp->max_cos != 1);
1350
1351 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
1352 BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1353 bnx2x_sp_mapping(bp, q_rdata), q_type);
1354
1355 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
1356 "igu_sb %d\n",
1357 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
1358 fp->igu_sb_id);
1359}
1360#endif
1361
1362static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1363 struct bnx2x_fp_txdata *txdata)
1364{
1365 int cnt = 1000;
1366
1367 while (bnx2x_has_tx_work_unload(txdata)) {
1368 if (!cnt) {
1369 BNX2X_ERR("timeout waiting for queue[%d]: "
1370 "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1371 txdata->txq_index, txdata->tx_pkt_prod,
1372 txdata->tx_pkt_cons);
1373#ifdef BNX2X_STOP_ON_ERROR
1374 bnx2x_panic();
1375 return -EBUSY;
1376#else
1377 break;
1378#endif
1379 }
1380 cnt--;
1381 usleep_range(1000, 1000);
1382 }
1383
1384 return 0;
1385}
1386
1387int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1388
1389static inline void __storm_memset_struct(struct bnx2x *bp,
1390 u32 addr, size_t size, u32 *data)
1391{
1392 int i;
1393 for (i = 0; i < size/4; i++)
1394 REG_WR(bp, addr + (i * 4), data[i]);
1395}
1396
1397static inline void storm_memset_func_cfg(struct bnx2x *bp,
1398 struct tstorm_eth_function_common_config *tcfg,
1399 u16 abs_fid)
1400{
1401 size_t size = sizeof(struct tstorm_eth_function_common_config);
1402
1403 u32 addr = BAR_TSTRORM_INTMEM +
1404 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
1405
1406 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
1407}
1408
1409static inline void storm_memset_cmng(struct bnx2x *bp,
1410 struct cmng_struct_per_port *cmng,
1411 u8 port)
1412{
1413 size_t size = sizeof(struct cmng_struct_per_port);
1414
1415 u32 addr = BAR_XSTRORM_INTMEM +
1416 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1417
1418 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1419}
1420
1421/**
1422 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1423 *
1424 * @bp: driver handle
1425 * @mask: bits that need to be cleared
1426 */
1427static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
1428{
1429 int tout = 5000; /* Wait for 5 secs tops */
1430
1431 while (tout--) {
1432 smp_mb();
1433 netif_addr_lock_bh(bp->dev);
1434 if (!(bp->sp_state & mask)) {
1435 netif_addr_unlock_bh(bp->dev);
1436 return true;
1437 }
1438 netif_addr_unlock_bh(bp->dev);
1439
1440 usleep_range(1000, 1000);
1441 }
1442
1443 smp_mb();
1444
1445 netif_addr_lock_bh(bp->dev);
1446 if (bp->sp_state & mask) {
1447 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
1448 "mask 0x%lx\n", bp->sp_state, mask);
1449 netif_addr_unlock_bh(bp->dev);
1450 return false;
1451 }
1452 netif_addr_unlock_bh(bp->dev);
1453
1454 return true;
1455}
1456
1457/**
1458 * bnx2x_set_ctx_validation - set CDU context validation values
1459 *
1460 * @bp: driver handle
1461 * @cxt: context of the connection on the host memory
1462 * @cid: SW CID of the connection to be configured
1463 */
1464void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
1465 u32 cid);
1466
1467void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
1468 u8 sb_index, u8 disable, u16 usec);
1469void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1470void bnx2x_release_phy_lock(struct bnx2x *bp);
1471
1472/**
1473 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1474 *
1475 * @bp: driver handle
1476 * @mf_cfg: MF configuration
1477 *
1478 */
1479static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1480{
1481 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1482 FUNC_MF_CFG_MAX_BW_SHIFT;
1483 if (!max_cfg) {
1484 BNX2X_ERR("Illegal configuration detected for Max BW - "
1485 "using 100 instead\n");
1486 max_cfg = 100;
1487 }
1488 return max_cfg;
1489}
1490
1491#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
new file mode 100644
index 00000000000..0b4acf67e0c
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2508 @@
1/* bnx2x_dcb.c: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Dmitry Kravkov
17 *
18 */
19#include <linux/netdevice.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/rtnetlink.h>
23#include <net/dcbnl.h>
24
25#include "bnx2x.h"
26#include "bnx2x_cmn.h"
27#include "bnx2x_dcb.h"
28
29/* forward declarations of dcbx related functions */
30static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
31static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
32static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
33static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
34static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
35 u32 *set_configuration_ets_pg,
36 u32 *pri_pg_tbl);
37static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
38 u32 *pg_pri_orginal_spread,
39 struct pg_help_data *help_data);
40static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
41 struct pg_help_data *help_data,
42 struct dcbx_ets_feature *ets,
43 u32 *pg_pri_orginal_spread);
44static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
45 struct cos_help_data *cos_data,
46 u32 *pg_pri_orginal_spread,
47 struct dcbx_ets_feature *ets);
48static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
49 struct bnx2x_func_tx_start_params*);
50
51/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
52static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
53 u32 addr, u32 len)
54{
55 int i;
56 for (i = 0; i < len; i += 4, buff++)
57 *buff = REG_RD(bp, addr + i);
58}
59
60static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
61 u32 addr, u32 len)
62{
63 int i;
64 for (i = 0; i < len; i += 4, buff++)
65 REG_WR(bp, addr + i, *buff);
66}
67
68static void bnx2x_pfc_set(struct bnx2x *bp)
69{
70 struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
71 u32 pri_bit, val = 0;
72 int i;
73
74 pfc_params.num_of_rx_cos_priority_mask =
75 bp->dcbx_port_params.ets.num_of_cos;
76
77 /* Tx COS configuration */
78 for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
79 /*
80 * We configure only the pauseable bits (non pauseable aren't
81 * configured at all) it's done to avoid false pauses from
82 * network
83 */
84 pfc_params.rx_cos_priority_mask[i] =
85 bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
86 & DCBX_PFC_PRI_PAUSE_MASK(bp);
87
88 /*
89 * Rx COS configuration
90 * Changing PFC RX configuration .
91 * In RX COS0 will always be configured to lossy and COS1 to lossless
92 */
93 for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
94 pri_bit = 1 << i;
95
96 if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
97 val |= 1 << (i * 4);
98 }
99
100 pfc_params.pkt_priority_to_cos = val;
101
102 /* RX COS0 */
103 pfc_params.llfc_low_priority_classes = 0;
104 /* RX COS1 */
105 pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
106
107 /* BRB configuration */
108 pfc_params.cos0_pauseable = false;
109 pfc_params.cos1_pauseable = true;
110
111 bnx2x_acquire_phy_lock(bp);
112 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
113 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
114 bnx2x_release_phy_lock(bp);
115}
116
117static void bnx2x_pfc_clear(struct bnx2x *bp)
118{
119 struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
120 nig_params.pause_enable = 1;
121#ifdef BNX2X_SAFC
122 if (bp->flags & SAFC_TX_FLAG) {
123 u32 high = 0, low = 0;
124 int i;
125
126 for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
127 if (bp->pri_map[i] == 1)
128 high |= (1 << i);
129 if (bp->pri_map[i] == 0)
130 low |= (1 << i);
131 }
132
133 nig_params.llfc_low_priority_classes = high;
134 nig_params.llfc_low_priority_classes = low;
135
136 nig_params.pause_enable = 0;
137 nig_params.llfc_enable = 1;
138 nig_params.llfc_out_en = 1;
139 }
140#endif /* BNX2X_SAFC */
141 bnx2x_acquire_phy_lock(bp);
142 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
143 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
144 bnx2x_release_phy_lock(bp);
145}
146
147static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
148 struct dcbx_features *features,
149 u32 error)
150{
151 u8 i = 0;
152 DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
153
154 /* PG */
155 DP(NETIF_MSG_LINK,
156 "local_mib.features.ets.enabled %x\n", features->ets.enabled);
157 for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
158 DP(NETIF_MSG_LINK,
159 "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
160 DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
161 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
162 DP(NETIF_MSG_LINK,
163 "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
164 DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
165
166 /* pfc */
167 DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
168 features->pfc.pri_en_bitmap);
169 DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
170 features->pfc.pfc_caps);
171 DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
172 features->pfc.enabled);
173
174 DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
175 features->app.default_pri);
176 DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
177 features->app.tc_supported);
178 DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
179 features->app.enabled);
180 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
181 DP(NETIF_MSG_LINK,
182 "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
183 i, features->app.app_pri_tbl[i].app_id);
184 DP(NETIF_MSG_LINK,
185 "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
186 i, features->app.app_pri_tbl[i].pri_bitmap);
187 DP(NETIF_MSG_LINK,
188 "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
189 i, features->app.app_pri_tbl[i].appBitfield);
190 }
191}
192
193static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
194 u8 pri_bitmap,
195 u8 llfc_traf_type)
196{
197 u32 pri = MAX_PFC_PRIORITIES;
198 u32 index = MAX_PFC_PRIORITIES - 1;
199 u32 pri_mask;
200 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
201
202 /* Choose the highest priority */
203 while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
204 pri_mask = 1 << index;
205 if (GET_FLAGS(pri_bitmap, pri_mask))
206 pri = index ;
207 index--;
208 }
209
210 if (pri < MAX_PFC_PRIORITIES)
211 ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
212}
213
214static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
215 struct dcbx_app_priority_feature *app,
216 u32 error) {
217 u8 index;
218 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
219
220 if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
221 DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
222
223 if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
224 DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
225
226 if (app->enabled &&
227 !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
228
229 bp->dcbx_port_params.app.enabled = true;
230
231 for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
232 ttp[index] = 0;
233
234 if (app->default_pri < MAX_PFC_PRIORITIES)
235 ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
236
237 for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
238 struct dcbx_app_priority_entry *entry =
239 app->app_pri_tbl;
240
241 if (GET_FLAGS(entry[index].appBitfield,
242 DCBX_APP_SF_ETH_TYPE) &&
243 ETH_TYPE_FCOE == entry[index].app_id)
244 bnx2x_dcbx_get_ap_priority(bp,
245 entry[index].pri_bitmap,
246 LLFC_TRAFFIC_TYPE_FCOE);
247
248 if (GET_FLAGS(entry[index].appBitfield,
249 DCBX_APP_SF_PORT) &&
250 TCP_PORT_ISCSI == entry[index].app_id)
251 bnx2x_dcbx_get_ap_priority(bp,
252 entry[index].pri_bitmap,
253 LLFC_TRAFFIC_TYPE_ISCSI);
254 }
255 } else {
256 DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
257 bp->dcbx_port_params.app.enabled = false;
258 for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
259 ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
260 }
261}
262
263static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
264 struct dcbx_ets_feature *ets,
265 u32 error) {
266 int i = 0;
267 u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
268 struct pg_help_data pg_help_data;
269 struct bnx2x_dcbx_cos_params *cos_params =
270 bp->dcbx_port_params.ets.cos_params;
271
272 memset(&pg_help_data, 0, sizeof(struct pg_help_data));
273
274
275 if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
276 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
277
278
279 /* Clean up old settings of ets on COS */
280 for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
281 cos_params[i].pauseable = false;
282 cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
283 cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
284 cos_params[i].pri_bitmask = 0;
285 }
286
287 if (bp->dcbx_port_params.app.enabled &&
288 !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
289 ets->enabled) {
290 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
291 bp->dcbx_port_params.ets.enabled = true;
292
293 bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
294 pg_pri_orginal_spread,
295 ets->pri_pg_tbl);
296
297 bnx2x_dcbx_get_num_pg_traf_type(bp,
298 pg_pri_orginal_spread,
299 &pg_help_data);
300
301 bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
302 ets, pg_pri_orginal_spread);
303
304 } else {
305 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
306 bp->dcbx_port_params.ets.enabled = false;
307 ets->pri_pg_tbl[0] = 0;
308
309 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
310 DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
311 }
312}
313
314static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
315 struct dcbx_pfc_feature *pfc, u32 error)
316{
317
318 if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
319 DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
320
321 if (bp->dcbx_port_params.app.enabled &&
322 !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
323 pfc->enabled) {
324 bp->dcbx_port_params.pfc.enabled = true;
325 bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
326 ~(pfc->pri_en_bitmap);
327 } else {
328 DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
329 bp->dcbx_port_params.pfc.enabled = false;
330 bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
331 }
332}
333
334/* maps unmapped priorities to to the same COS as L2 */
335static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
336{
337 int i;
338 u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
339 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
340 u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
341 struct bnx2x_dcbx_cos_params *cos_params =
342 bp->dcbx_port_params.ets.cos_params;
343
344 /* get unmapped priorities by clearing mapped bits */
345 for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
346 unmapped &= ~(1 << ttp[i]);
347
348 /* find cos for nw prio and extend it with unmapped */
349 for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
350 if (cos_params[i].pri_bitmask & nw_prio) {
351 /* extend the bitmask with unmapped */
352 DP(NETIF_MSG_LINK,
353 "cos %d extended with 0x%08x", i, unmapped);
354 cos_params[i].pri_bitmask |= unmapped;
355 break;
356 }
357 }
358}
359
360static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
361 struct dcbx_features *features,
362 u32 error)
363{
364 bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
365
366 bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
367
368 bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
369
370 bnx2x_dcbx_map_nw(bp);
371}
372
373#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
374static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
375 u32 *base_mib_addr,
376 u32 offset,
377 int read_mib_type)
378{
379 int max_try_read = 0;
380 u32 mib_size, prefix_seq_num, suffix_seq_num;
381 struct lldp_remote_mib *remote_mib ;
382 struct lldp_local_mib *local_mib;
383
384
385 switch (read_mib_type) {
386 case DCBX_READ_LOCAL_MIB:
387 mib_size = sizeof(struct lldp_local_mib);
388 break;
389 case DCBX_READ_REMOTE_MIB:
390 mib_size = sizeof(struct lldp_remote_mib);
391 break;
392 default:
393 return 1; /*error*/
394 }
395
396 offset += BP_PORT(bp) * mib_size;
397
398 do {
399 bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
400
401 max_try_read++;
402
403 switch (read_mib_type) {
404 case DCBX_READ_LOCAL_MIB:
405 local_mib = (struct lldp_local_mib *) base_mib_addr;
406 prefix_seq_num = local_mib->prefix_seq_num;
407 suffix_seq_num = local_mib->suffix_seq_num;
408 break;
409 case DCBX_READ_REMOTE_MIB:
410 remote_mib = (struct lldp_remote_mib *) base_mib_addr;
411 prefix_seq_num = remote_mib->prefix_seq_num;
412 suffix_seq_num = remote_mib->suffix_seq_num;
413 break;
414 default:
415 return 1; /*error*/
416 }
417 } while ((prefix_seq_num != suffix_seq_num) &&
418 (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
419
420 if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
421 BNX2X_ERR("MIB could not be read\n");
422 return 1;
423 }
424
425 return 0;
426}
427
428static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
429{
430 if (bp->dcbx_port_params.pfc.enabled &&
431 !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
432 /*
433 * 1. Fills up common PFC structures if required
434 * 2. Configure NIG, MAC and BRB via the elink
435 */
436 bnx2x_pfc_set(bp);
437 else
438 bnx2x_pfc_clear(bp);
439}
440
441static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
442{
443 struct bnx2x_func_state_params func_params = {0};
444
445 func_params.f_obj = &bp->func_obj;
446 func_params.cmd = BNX2X_F_CMD_TX_STOP;
447
448 DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
449 return bnx2x_func_state_change(bp, &func_params);
450}
451
452static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
453{
454 struct bnx2x_func_state_params func_params = {0};
455 struct bnx2x_func_tx_start_params *tx_params =
456 &func_params.params.tx_start;
457
458 func_params.f_obj = &bp->func_obj;
459 func_params.cmd = BNX2X_F_CMD_TX_START;
460
461 bnx2x_dcbx_fw_struct(bp, tx_params);
462
463 DP(NETIF_MSG_LINK, "START TRAFFIC\n");
464 return bnx2x_func_state_change(bp, &func_params);
465}
466
467static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
468{
469 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
470 int rc = 0;
471
472 if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
473 BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
474 return;
475 }
476
477 /* valid COS entries */
478 if (ets->num_of_cos == 1) /* no ETS */
479 return;
480
481 /* sanity */
482 if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
483 (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
484 ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
485 (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
486 BNX2X_ERR("all COS should have at least bw_limit or strict"
487 "ets->cos_params[0].strict= %x"
488 "ets->cos_params[0].bw_tbl= %x"
489 "ets->cos_params[1].strict= %x"
490 "ets->cos_params[1].bw_tbl= %x",
491 ets->cos_params[0].strict,
492 ets->cos_params[0].bw_tbl,
493 ets->cos_params[1].strict,
494 ets->cos_params[1].bw_tbl);
495 return;
496 }
497 /* If we join a group and there is bw_tbl and strict then bw rules */
498 if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
499 (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
500 u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
501 u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
502 /* Do not allow 0-100 configuration
503 * since PBF does not support it
504 * force 1-99 instead
505 */
506 if (bw_tbl_0 == 0) {
507 bw_tbl_0 = 1;
508 bw_tbl_1 = 99;
509 } else if (bw_tbl_1 == 0) {
510 bw_tbl_1 = 1;
511 bw_tbl_0 = 99;
512 }
513
514 bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
515 } else {
516 if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
517 rc = bnx2x_ets_strict(&bp->link_params, 0);
518 else if (ets->cos_params[1].strict
519 == BNX2X_DCBX_STRICT_COS_HIGHEST)
520 rc = bnx2x_ets_strict(&bp->link_params, 1);
521 if (rc)
522 BNX2X_ERR("update_ets_params failed\n");
523 }
524}
525
526/*
527 * In E3B0 the configuration may have more than 2 COS.
528 */
529void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
530{
531 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
532 struct bnx2x_ets_params ets_params = { 0 };
533 u8 i;
534
535 ets_params.num_of_cos = ets->num_of_cos;
536
537 for (i = 0; i < ets->num_of_cos; i++) {
538 /* COS is SP */
539 if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
540 if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
541 BNX2X_ERR("COS can't be not BW and not SP\n");
542 return;
543 }
544
545 ets_params.cos[i].state = bnx2x_cos_state_strict;
546 ets_params.cos[i].params.sp_params.pri =
547 ets->cos_params[i].strict;
548 } else { /* COS is BW */
549 if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
550 BNX2X_ERR("COS can't be not BW and not SP\n");
551 return;
552 }
553 ets_params.cos[i].state = bnx2x_cos_state_bw;
554 ets_params.cos[i].params.bw_params.bw =
555 (u8)ets->cos_params[i].bw_tbl;
556 }
557 }
558
559 /* Configure the ETS in HW */
560 if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
561 &ets_params)) {
562 BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
563 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
564 }
565}
566
567static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
568{
569 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
570
571 if (!bp->dcbx_port_params.ets.enabled ||
572 (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
573 return;
574
575 if (CHIP_IS_E3B0(bp))
576 bnx2x_dcbx_update_ets_config(bp);
577 else
578 bnx2x_dcbx_2cos_limit_update_ets_config(bp);
579}
580
581#ifdef BCM_DCBNL
582static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
583{
584 struct lldp_remote_mib remote_mib = {0};
585 u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
586 int rc;
587
588 DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
589 dcbx_remote_mib_offset);
590
591 if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
592 BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
593 return -EINVAL;
594 }
595
596 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
597 DCBX_READ_REMOTE_MIB);
598
599 if (rc) {
600 BNX2X_ERR("Faild to read remote mib from FW\n");
601 return rc;
602 }
603
604 /* save features and flags */
605 bp->dcbx_remote_feat = remote_mib.features;
606 bp->dcbx_remote_flags = remote_mib.flags;
607 return 0;
608}
609#endif
610
611static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
612{
613 struct lldp_local_mib local_mib = {0};
614 u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
615 int rc;
616
617 DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
618
619 if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
620 BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
621 return -EINVAL;
622 }
623
624 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
625 DCBX_READ_LOCAL_MIB);
626
627 if (rc) {
628 BNX2X_ERR("Faild to read local mib from FW\n");
629 return rc;
630 }
631
632 /* save features and error */
633 bp->dcbx_local_feat = local_mib.features;
634 bp->dcbx_error = local_mib.error;
635 return 0;
636}
637
638
639#ifdef BCM_DCBNL
640static inline
641u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
642{
643 u8 pri;
644
645 /* Choose the highest priority */
646 for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
647 if (ent->pri_bitmap & (1 << pri))
648 break;
649 return pri;
650}
651
652static inline
653u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
654{
655 return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
656 DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
657 DCB_APP_IDTYPE_ETHTYPE;
658}
659
660int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
661{
662 int i, err = 0;
663
664 for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
665 struct dcbx_app_priority_entry *ent =
666 &bp->dcbx_local_feat.app.app_pri_tbl[i];
667
668 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
669 u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
670
671 /* avoid invalid user-priority */
672 if (up) {
673 struct dcb_app app;
674 app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
675 app.protocol = ent->app_id;
676 app.priority = delall ? 0 : up;
677 err = dcb_setapp(bp->dev, &app);
678 }
679 }
680 }
681 return err;
682}
683#endif
684
685static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
686{
687 if (SHMEM2_HAS(bp, drv_flags)) {
688 u32 drv_flags;
689 bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
690 drv_flags = SHMEM2_RD(bp, drv_flags);
691
692 if (set)
693 SET_FLAGS(drv_flags, flags);
694 else
695 RESET_FLAGS(drv_flags, flags);
696
697 SHMEM2_WR(bp, drv_flags, drv_flags);
698 DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
699 bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
700 }
701}
702
703static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
704{
705 u8 prio, cos;
706 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
707 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
708 if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
709 & (1 << prio)) {
710 bp->prio_to_cos[prio] = cos;
711 DP(NETIF_MSG_LINK,
712 "tx_mapping %d --> %d\n", prio, cos);
713 }
714 }
715 }
716
717 /* setup tc must be called under rtnl lock, but we can't take it here
718 * as we are handling an attetntion on a work queue which must be
719 * flushed at some rtnl-locked contexts (e.g. if down)
720 */
721 if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
722 schedule_delayed_work(&bp->sp_rtnl_task, 0);
723}
724
725void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
726{
727 switch (state) {
728 case BNX2X_DCBX_STATE_NEG_RECEIVED:
729 {
730 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
731#ifdef BCM_DCBNL
732 /**
733 * Delete app tlvs from dcbnl before reading new
734 * negotiation results
735 */
736 bnx2x_dcbnl_update_applist(bp, true);
737
738 /* Read rmeote mib if dcbx is in the FW */
739 if (bnx2x_dcbx_read_shmem_remote_mib(bp))
740 return;
741#endif
742 /* Read neg results if dcbx is in the FW */
743 if (bnx2x_dcbx_read_shmem_neg_results(bp))
744 return;
745
746 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
747 bp->dcbx_error);
748
749 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
750 bp->dcbx_error);
751
752 /* mark DCBX result for PMF migration */
753 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
754#ifdef BCM_DCBNL
755 /**
756 * Add new app tlvs to dcbnl
757 */
758 bnx2x_dcbnl_update_applist(bp, false);
759#endif
760 bnx2x_dcbx_stop_hw_tx(bp);
761
762 /* reconfigure the netdevice with the results of the new
763 * dcbx negotiation.
764 */
765 bnx2x_dcbx_update_tc_mapping(bp);
766
767 return;
768 }
769 case BNX2X_DCBX_STATE_TX_PAUSED:
770 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
771 bnx2x_pfc_set_pfc(bp);
772
773 bnx2x_dcbx_update_ets_params(bp);
774 bnx2x_dcbx_resume_hw_tx(bp);
775 return;
776 case BNX2X_DCBX_STATE_TX_RELEASED:
777 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
778 bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
779#ifdef BCM_DCBNL
780 /*
781 * Send a notification for the new negotiated parameters
782 */
783 dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
784#endif
785 return;
786 default:
787 BNX2X_ERR("Unknown DCBX_STATE\n");
788 }
789}
790
791#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
792 BP_PORT(bp)*sizeof(struct lldp_admin_mib))
793
794static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
795 u32 dcbx_lldp_params_offset)
796{
797 struct lldp_admin_mib admin_mib;
798 u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
799 u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
800
801 /*shortcuts*/
802 struct dcbx_features *af = &admin_mib.features;
803 struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
804
805 memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
806
807 /* Read the data first */
808 bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
809 sizeof(struct lldp_admin_mib));
810
811 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
812 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
813 else
814 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
815
816 if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
817
818 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
819 admin_mib.ver_cfg_flags |=
820 (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
821 DCBX_CEE_VERSION_MASK;
822
823 af->ets.enabled = (u8)dp->admin_ets_enable;
824
825 af->pfc.enabled = (u8)dp->admin_pfc_enable;
826
827 /* FOR IEEE dp->admin_tc_supported_tx_enable */
828 if (dp->admin_ets_configuration_tx_enable)
829 SET_FLAGS(admin_mib.ver_cfg_flags,
830 DCBX_ETS_CONFIG_TX_ENABLED);
831 else
832 RESET_FLAGS(admin_mib.ver_cfg_flags,
833 DCBX_ETS_CONFIG_TX_ENABLED);
834 /* For IEEE admin_ets_recommendation_tx_enable */
835 if (dp->admin_pfc_tx_enable)
836 SET_FLAGS(admin_mib.ver_cfg_flags,
837 DCBX_PFC_CONFIG_TX_ENABLED);
838 else
839 RESET_FLAGS(admin_mib.ver_cfg_flags,
840 DCBX_PFC_CONFIG_TX_ENABLED);
841
842 if (dp->admin_application_priority_tx_enable)
843 SET_FLAGS(admin_mib.ver_cfg_flags,
844 DCBX_APP_CONFIG_TX_ENABLED);
845 else
846 RESET_FLAGS(admin_mib.ver_cfg_flags,
847 DCBX_APP_CONFIG_TX_ENABLED);
848
849 if (dp->admin_ets_willing)
850 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
851 else
852 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
853 /* For IEEE admin_ets_reco_valid */
854 if (dp->admin_pfc_willing)
855 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
856 else
857 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
858
859 if (dp->admin_app_priority_willing)
860 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
861 else
862 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
863
864 for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
865 DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
866 (u8)dp->admin_configuration_bw_precentage[i]);
867
868 DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
869 i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
870 }
871
872 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
873 DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
874 (u8)dp->admin_configuration_ets_pg[i]);
875
876 DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
877 i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
878 }
879
880 /*For IEEE admin_recommendation_bw_precentage
881 *For IEEE admin_recommendation_ets_pg */
882 af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
883 for (i = 0; i < 4; i++) {
884 if (dp->admin_priority_app_table[i].valid) {
885 struct bnx2x_admin_priority_app_table *table =
886 dp->admin_priority_app_table;
887 if ((ETH_TYPE_FCOE == table[i].app_id) &&
888 (TRAFFIC_TYPE_ETH == table[i].traffic_type))
889 traf_type = FCOE_APP_IDX;
890 else if ((TCP_PORT_ISCSI == table[i].app_id) &&
891 (TRAFFIC_TYPE_PORT == table[i].traffic_type))
892 traf_type = ISCSI_APP_IDX;
893 else
894 traf_type = other_traf_type++;
895
896 af->app.app_pri_tbl[traf_type].app_id =
897 table[i].app_id;
898
899 af->app.app_pri_tbl[traf_type].pri_bitmap =
900 (u8)(1 << table[i].priority);
901
902 af->app.app_pri_tbl[traf_type].appBitfield =
903 (DCBX_APP_ENTRY_VALID);
904
905 af->app.app_pri_tbl[traf_type].appBitfield |=
906 (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
907 DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
908 }
909 }
910
911 af->app.default_pri = (u8)dp->admin_default_priority;
912
913 }
914
915 /* Write the data. */
916 bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
917 sizeof(struct lldp_admin_mib));
918
919}
920
921void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
922{
923 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
924 bp->dcb_state = dcb_on;
925 bp->dcbx_enabled = dcbx_enabled;
926 } else {
927 bp->dcb_state = false;
928 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
929 }
930 DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
931 dcb_on ? "ON" : "OFF",
932 dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
933 dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
934 dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
935 "on-chip with negotiation" : "invalid");
936}
937
938void bnx2x_dcbx_init_params(struct bnx2x *bp)
939{
940 bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
941 bp->dcbx_config_params.admin_ets_willing = 1;
942 bp->dcbx_config_params.admin_pfc_willing = 1;
943 bp->dcbx_config_params.overwrite_settings = 1;
944 bp->dcbx_config_params.admin_ets_enable = 1;
945 bp->dcbx_config_params.admin_pfc_enable = 1;
946 bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
947 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
948 bp->dcbx_config_params.admin_pfc_tx_enable = 1;
949 bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
950 bp->dcbx_config_params.admin_ets_reco_valid = 1;
951 bp->dcbx_config_params.admin_app_priority_willing = 1;
952 bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
953 bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
954 bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
955 bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
956 bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
957 bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
958 bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
959 bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
960 bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
961 bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
962 bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
963 bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
964 bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
965 bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
966 bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
967 bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
968 bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
969 bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
970 bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
971 bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
972 bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
973 bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
974 bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
975 bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
976 bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
977 bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
978 bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
979 bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
980 bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
981 bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
982 bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
983 bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
984 bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
985 bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
986 bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
987 bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
988 bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
989 bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
990 bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
991 bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
992 bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
993 bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
994 bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
995 bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
996 bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
997 bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
998 bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
999 bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
1000 bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
1001 bp->dcbx_config_params.admin_default_priority =
1002 bp->dcbx_config_params.admin_priority_app_table[1].priority;
1003}
1004
1005void bnx2x_dcbx_init(struct bnx2x *bp)
1006{
1007 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
1008
1009 if (bp->dcbx_enabled <= 0)
1010 return;
1011
1012 /* validate:
1013 * chip of good for dcbx version,
1014 * dcb is wanted
1015 * the function is pmf
1016 * shmem2 contains DCBX support fields
1017 */
1018 DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
1019 bp->dcb_state, bp->port.pmf);
1020
1021 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
1022 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
1023 dcbx_lldp_params_offset =
1024 SHMEM2_RD(bp, dcbx_lldp_params_offset);
1025
1026 DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
1027 dcbx_lldp_params_offset);
1028
1029 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
1030
1031 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
1032 bnx2x_dcbx_admin_mib_updated_params(bp,
1033 dcbx_lldp_params_offset);
1034
1035 /* Let HW start negotiation */
1036 bnx2x_fw_command(bp,
1037 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
1038 }
1039 }
1040}
1041static void
1042bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
1043 struct bnx2x_func_tx_start_params *pfc_fw_cfg)
1044{
1045 u8 pri = 0;
1046 u8 cos = 0;
1047
1048 DP(NETIF_MSG_LINK,
1049 "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
1050 DP(NETIF_MSG_LINK,
1051 "pdev->params.dcbx_port_params.pfc."
1052 "priority_non_pauseable_mask %x\n",
1053 bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
1054
1055 for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
1056 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1057 "cos_params[%d].pri_bitmask %x\n", cos,
1058 bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
1059
1060 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1061 "cos_params[%d].bw_tbl %x\n", cos,
1062 bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
1063
1064 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1065 "cos_params[%d].strict %x\n", cos,
1066 bp->dcbx_port_params.ets.cos_params[cos].strict);
1067
1068 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1069 "cos_params[%d].pauseable %x\n", cos,
1070 bp->dcbx_port_params.ets.cos_params[cos].pauseable);
1071 }
1072
1073 for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
1074 DP(NETIF_MSG_LINK,
1075 "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
1076 "priority %x\n", pri,
1077 pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
1078
1079 DP(NETIF_MSG_LINK,
1080 "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
1081 pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
1082 }
1083}
1084
1085/* fills help_data according to pg_info */
1086static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
1087 u32 *pg_pri_orginal_spread,
1088 struct pg_help_data *help_data)
1089{
1090 bool pg_found = false;
1091 u32 i, traf_type, add_traf_type, add_pg;
1092 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1093 struct pg_entry_help_data *data = help_data->data; /*shotcut*/
1094
1095 /* Set to invalid */
1096 for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
1097 data[i].pg = DCBX_ILLEGAL_PG;
1098
1099 for (add_traf_type = 0;
1100 add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
1101 pg_found = false;
1102 if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
1103 add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
1104 for (traf_type = 0;
1105 traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1106 traf_type++) {
1107 if (data[traf_type].pg == add_pg) {
1108 if (!(data[traf_type].pg_priority &
1109 (1 << ttp[add_traf_type])))
1110 data[traf_type].
1111 num_of_dif_pri++;
1112 data[traf_type].pg_priority |=
1113 (1 << ttp[add_traf_type]);
1114 pg_found = true;
1115 break;
1116 }
1117 }
1118 if (false == pg_found) {
1119 data[help_data->num_of_pg].pg = add_pg;
1120 data[help_data->num_of_pg].pg_priority =
1121 (1 << ttp[add_traf_type]);
1122 data[help_data->num_of_pg].num_of_dif_pri = 1;
1123 help_data->num_of_pg++;
1124 }
1125 }
1126 DP(NETIF_MSG_LINK,
1127 "add_traf_type %d pg_found %s num_of_pg %d\n",
1128 add_traf_type, (false == pg_found) ? "NO" : "YES",
1129 help_data->num_of_pg);
1130 }
1131}
1132
1133static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
1134 struct cos_help_data *cos_data,
1135 u32 pri_join_mask)
1136{
1137 /* Only one priority than only one COS */
1138 cos_data->data[0].pausable =
1139 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1140 cos_data->data[0].pri_join_mask = pri_join_mask;
1141 cos_data->data[0].cos_bw = 100;
1142 cos_data->num_of_cos = 1;
1143}
1144
1145static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
1146 struct cos_entry_help_data *data,
1147 u8 pg_bw)
1148{
1149 if (data->cos_bw == DCBX_INVALID_COS_BW)
1150 data->cos_bw = pg_bw;
1151 else
1152 data->cos_bw += pg_bw;
1153}
1154
1155static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
1156 struct cos_help_data *cos_data,
1157 u32 *pg_pri_orginal_spread,
1158 struct dcbx_ets_feature *ets)
1159{
1160 u32 pri_tested = 0;
1161 u8 i = 0;
1162 u8 entry = 0;
1163 u8 pg_entry = 0;
1164 u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1165
1166 cos_data->data[0].pausable = true;
1167 cos_data->data[1].pausable = false;
1168 cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
1169
1170 for (i = 0 ; i < num_of_pri ; i++) {
1171 pri_tested = 1 << bp->dcbx_port_params.
1172 app.traffic_type_priority[i];
1173
1174 if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
1175 cos_data->data[1].pri_join_mask |= pri_tested;
1176 entry = 1;
1177 } else {
1178 cos_data->data[0].pri_join_mask |= pri_tested;
1179 entry = 0;
1180 }
1181 pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
1182 app.traffic_type_priority[i]];
1183 /* There can be only one strict pg */
1184 if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
1185 bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
1186 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
1187 else
1188 /* If we join a group and one is strict
1189 * than the bw rulls */
1190 cos_data->data[entry].strict =
1191 BNX2X_DCBX_STRICT_COS_HIGHEST;
1192 }
1193 if ((0 == cos_data->data[0].pri_join_mask) &&
1194 (0 == cos_data->data[1].pri_join_mask))
1195 BNX2X_ERR("dcbx error: Both groups must have priorities\n");
1196}
1197
1198
1199#ifndef POWER_OF_2
1200#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
1201#endif
1202
1203static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
1204 struct pg_help_data *pg_help_data,
1205 struct cos_help_data *cos_data,
1206 u32 pri_join_mask,
1207 u8 num_of_dif_pri)
1208{
1209 u8 i = 0;
1210 u32 pri_tested = 0;
1211 u32 pri_mask_without_pri = 0;
1212 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1213 /*debug*/
1214 if (num_of_dif_pri == 1) {
1215 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
1216 return;
1217 }
1218 /* single priority group */
1219 if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
1220 /* If there are both pauseable and non-pauseable priorities,
1221 * the pauseable priorities go to the first queue and
1222 * the non-pauseable priorities go to the second queue.
1223 */
1224 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1225 /* Pauseable */
1226 cos_data->data[0].pausable = true;
1227 /* Non pauseable.*/
1228 cos_data->data[1].pausable = false;
1229
1230 if (2 == num_of_dif_pri) {
1231 cos_data->data[0].cos_bw = 50;
1232 cos_data->data[1].cos_bw = 50;
1233 }
1234
1235 if (3 == num_of_dif_pri) {
1236 if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
1237 pri_join_mask))) {
1238 cos_data->data[0].cos_bw = 33;
1239 cos_data->data[1].cos_bw = 67;
1240 } else {
1241 cos_data->data[0].cos_bw = 67;
1242 cos_data->data[1].cos_bw = 33;
1243 }
1244 }
1245
1246 } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
1247 /* If there are only pauseable priorities,
1248 * then one/two priorities go to the first queue
1249 * and one priority goes to the second queue.
1250 */
1251 if (2 == num_of_dif_pri) {
1252 cos_data->data[0].cos_bw = 50;
1253 cos_data->data[1].cos_bw = 50;
1254 } else {
1255 cos_data->data[0].cos_bw = 67;
1256 cos_data->data[1].cos_bw = 33;
1257 }
1258 cos_data->data[1].pausable = true;
1259 cos_data->data[0].pausable = true;
1260 /* All priorities except FCOE */
1261 cos_data->data[0].pri_join_mask = (pri_join_mask &
1262 ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
1263 /* Only FCOE priority.*/
1264 cos_data->data[1].pri_join_mask =
1265 (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
1266 } else
1267 /* If there are only non-pauseable priorities,
1268 * they will all go to the same queue.
1269 */
1270 bnx2x_dcbx_ets_disabled_entry_data(bp,
1271 cos_data, pri_join_mask);
1272 } else {
1273 /* priority group which is not BW limited (PG#15):*/
1274 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1275 /* If there are both pauseable and non-pauseable
1276 * priorities, the pauseable priorities go to the first
1277 * queue and the non-pauseable priorities
1278 * go to the second queue.
1279 */
1280 if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
1281 DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
1282 cos_data->data[0].strict =
1283 BNX2X_DCBX_STRICT_COS_HIGHEST;
1284 cos_data->data[1].strict =
1285 BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
1286 BNX2X_DCBX_STRICT_COS_HIGHEST);
1287 } else {
1288 cos_data->data[0].strict =
1289 BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
1290 BNX2X_DCBX_STRICT_COS_HIGHEST);
1291 cos_data->data[1].strict =
1292 BNX2X_DCBX_STRICT_COS_HIGHEST;
1293 }
1294 /* Pauseable */
1295 cos_data->data[0].pausable = true;
1296 /* Non pause-able.*/
1297 cos_data->data[1].pausable = false;
1298 } else {
1299 /* If there are only pauseable priorities or
1300 * only non-pauseable,* the lower priorities go
1301 * to the first queue and the higherpriorities go
1302 * to the second queue.
1303 */
1304 cos_data->data[0].pausable =
1305 cos_data->data[1].pausable =
1306 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1307
1308 for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
1309 pri_tested = 1 << bp->dcbx_port_params.
1310 app.traffic_type_priority[i];
1311 /* Remove priority tested */
1312 pri_mask_without_pri =
1313 (pri_join_mask & ((u8)(~pri_tested)));
1314 if (pri_mask_without_pri < pri_tested)
1315 break;
1316 }
1317
1318 if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
1319 BNX2X_ERR("Invalid value for pri_join_mask -"
1320 " could not find a priority\n");
1321
1322 cos_data->data[0].pri_join_mask = pri_mask_without_pri;
1323 cos_data->data[1].pri_join_mask = pri_tested;
1324 /* Both queues are strict priority,
1325 * and that with the highest priority
1326 * gets the highest strict priority in the arbiter.
1327 */
1328 cos_data->data[0].strict =
1329 BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
1330 BNX2X_DCBX_STRICT_COS_HIGHEST);
1331 cos_data->data[1].strict =
1332 BNX2X_DCBX_STRICT_COS_HIGHEST;
1333 }
1334 }
1335}
1336
1337static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
1338 struct bnx2x *bp,
1339 struct pg_help_data *pg_help_data,
1340 struct dcbx_ets_feature *ets,
1341 struct cos_help_data *cos_data,
1342 u32 *pg_pri_orginal_spread,
1343 u32 pri_join_mask,
1344 u8 num_of_dif_pri)
1345{
1346 u8 i = 0;
1347 u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
1348
1349 /* If there are both pauseable and non-pauseable priorities,
1350 * the pauseable priorities go to the first queue and
1351 * the non-pauseable priorities go to the second queue.
1352 */
1353 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1354 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
1355 pg_help_data->data[0].pg_priority) ||
1356 IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
1357 pg_help_data->data[1].pg_priority)) {
1358 /* If one PG contains both pauseable and
1359 * non-pauseable priorities then ETS is disabled.
1360 */
1361 bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
1362 pg_pri_orginal_spread, ets);
1363 bp->dcbx_port_params.ets.enabled = false;
1364 return;
1365 }
1366
1367 /* Pauseable */
1368 cos_data->data[0].pausable = true;
1369 /* Non pauseable. */
1370 cos_data->data[1].pausable = false;
1371 if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
1372 pg_help_data->data[0].pg_priority)) {
1373 /* 0 is pauseable */
1374 cos_data->data[0].pri_join_mask =
1375 pg_help_data->data[0].pg_priority;
1376 pg[0] = pg_help_data->data[0].pg;
1377 cos_data->data[1].pri_join_mask =
1378 pg_help_data->data[1].pg_priority;
1379 pg[1] = pg_help_data->data[1].pg;
1380 } else {/* 1 is pauseable */
1381 cos_data->data[0].pri_join_mask =
1382 pg_help_data->data[1].pg_priority;
1383 pg[0] = pg_help_data->data[1].pg;
1384 cos_data->data[1].pri_join_mask =
1385 pg_help_data->data[0].pg_priority;
1386 pg[1] = pg_help_data->data[0].pg;
1387 }
1388 } else {
1389 /* If there are only pauseable priorities or
1390 * only non-pauseable, each PG goes to a queue.
1391 */
1392 cos_data->data[0].pausable = cos_data->data[1].pausable =
1393 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1394 cos_data->data[0].pri_join_mask =
1395 pg_help_data->data[0].pg_priority;
1396 pg[0] = pg_help_data->data[0].pg;
1397 cos_data->data[1].pri_join_mask =
1398 pg_help_data->data[1].pg_priority;
1399 pg[1] = pg_help_data->data[1].pg;
1400 }
1401
1402 /* There can be only one strict pg */
1403 for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
1404 if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
1405 cos_data->data[i].cos_bw =
1406 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
1407 else
1408 cos_data->data[i].strict =
1409 BNX2X_DCBX_STRICT_COS_HIGHEST;
1410 }
1411}
1412
1413static int bnx2x_dcbx_join_pgs(
1414 struct bnx2x *bp,
1415 struct dcbx_ets_feature *ets,
1416 struct pg_help_data *pg_help_data,
1417 u8 required_num_of_pg)
1418{
1419 u8 entry_joined = pg_help_data->num_of_pg - 1;
1420 u8 entry_removed = entry_joined + 1;
1421 u8 pg_joined = 0;
1422
1423 if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
1424 <= pg_help_data->num_of_pg) {
1425
1426 BNX2X_ERR("required_num_of_pg can't be zero\n");
1427 return -EINVAL;
1428 }
1429
1430 while (required_num_of_pg < pg_help_data->num_of_pg) {
1431 entry_joined = pg_help_data->num_of_pg - 2;
1432 entry_removed = entry_joined + 1;
1433 /* protect index */
1434 entry_removed %= ARRAY_SIZE(pg_help_data->data);
1435
1436 pg_help_data->data[entry_joined].pg_priority |=
1437 pg_help_data->data[entry_removed].pg_priority;
1438
1439 pg_help_data->data[entry_joined].num_of_dif_pri +=
1440 pg_help_data->data[entry_removed].num_of_dif_pri;
1441
1442 if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
1443 pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
1444 /* Entries joined strict priority rules */
1445 pg_help_data->data[entry_joined].pg =
1446 DCBX_STRICT_PRI_PG;
1447 else {
1448 /* Entries can be joined join BW */
1449 pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
1450 pg_help_data->data[entry_joined].pg) +
1451 DCBX_PG_BW_GET(ets->pg_bw_tbl,
1452 pg_help_data->data[entry_removed].pg);
1453
1454 DCBX_PG_BW_SET(ets->pg_bw_tbl,
1455 pg_help_data->data[entry_joined].pg, pg_joined);
1456 }
1457 /* Joined the entries */
1458 pg_help_data->num_of_pg--;
1459 }
1460
1461 return 0;
1462}
1463
1464static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
1465 struct bnx2x *bp,
1466 struct pg_help_data *pg_help_data,
1467 struct dcbx_ets_feature *ets,
1468 struct cos_help_data *cos_data,
1469 u32 *pg_pri_orginal_spread,
1470 u32 pri_join_mask,
1471 u8 num_of_dif_pri)
1472{
1473 u8 i = 0;
1474 u32 pri_tested = 0;
1475 u8 entry = 0;
1476 u8 pg_entry = 0;
1477 bool b_found_strict = false;
1478 u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1479
1480 cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
1481 /* If there are both pauseable and non-pauseable priorities,
1482 * the pauseable priorities go to the first queue and the
1483 * non-pauseable priorities go to the second queue.
1484 */
1485 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
1486 bnx2x_dcbx_separate_pauseable_from_non(bp,
1487 cos_data, pg_pri_orginal_spread, ets);
1488 else {
1489 /* If two BW-limited PG-s were combined to one queue,
1490 * the BW is their sum.
1491 *
1492 * If there are only pauseable priorities or only non-pauseable,
1493 * and there are both BW-limited and non-BW-limited PG-s,
1494 * the BW-limited PG/s go to one queue and the non-BW-limited
1495 * PG/s go to the second queue.
1496 *
1497 * If there are only pauseable priorities or only non-pauseable
1498 * and all are BW limited, then two priorities go to the first
1499 * queue and one priority goes to the second queue.
1500 *
1501 * We will join this two cases:
1502 * if one is BW limited it will go to the secoend queue
1503 * otherwise the last priority will get it
1504 */
1505
1506 cos_data->data[0].pausable = cos_data->data[1].pausable =
1507 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1508
1509 for (i = 0 ; i < num_of_pri; i++) {
1510 pri_tested = 1 << bp->dcbx_port_params.
1511 app.traffic_type_priority[i];
1512 pg_entry = (u8)pg_pri_orginal_spread[bp->
1513 dcbx_port_params.app.traffic_type_priority[i]];
1514
1515 if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
1516 entry = 0;
1517
1518 if (i == (num_of_pri-1) &&
1519 false == b_found_strict)
1520 /* last entry will be handled separately
1521 * If no priority is strict than last
1522 * enty goes to last queue.*/
1523 entry = 1;
1524 cos_data->data[entry].pri_join_mask |=
1525 pri_tested;
1526 bnx2x_dcbx_add_to_cos_bw(bp,
1527 &cos_data->data[entry],
1528 DCBX_PG_BW_GET(ets->pg_bw_tbl,
1529 pg_entry));
1530 } else {
1531 b_found_strict = true;
1532 cos_data->data[1].pri_join_mask |= pri_tested;
1533 /* If we join a group and one is strict
1534 * than the bw rulls */
1535 cos_data->data[1].strict =
1536 BNX2X_DCBX_STRICT_COS_HIGHEST;
1537 }
1538 }
1539 }
1540}
1541
1542
1543static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
1544 struct pg_help_data *help_data,
1545 struct dcbx_ets_feature *ets,
1546 struct cos_help_data *cos_data,
1547 u32 *pg_pri_orginal_spread,
1548 u32 pri_join_mask,
1549 u8 num_of_dif_pri)
1550{
1551
1552 /* default E2 settings */
1553 cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
1554
1555 switch (help_data->num_of_pg) {
1556 case 1:
1557 bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
1558 bp,
1559 help_data,
1560 cos_data,
1561 pri_join_mask,
1562 num_of_dif_pri);
1563 break;
1564 case 2:
1565 bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
1566 bp,
1567 help_data,
1568 ets,
1569 cos_data,
1570 pg_pri_orginal_spread,
1571 pri_join_mask,
1572 num_of_dif_pri);
1573 break;
1574
1575 case 3:
1576 bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
1577 bp,
1578 help_data,
1579 ets,
1580 cos_data,
1581 pg_pri_orginal_spread,
1582 pri_join_mask,
1583 num_of_dif_pri);
1584 break;
1585 default:
1586 BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
1587 bnx2x_dcbx_ets_disabled_entry_data(bp,
1588 cos_data, pri_join_mask);
1589 }
1590}
1591
1592static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
1593 struct cos_help_data *cos_data,
1594 u8 entry,
1595 u8 num_spread_of_entries,
1596 u8 strict_app_pris)
1597{
1598 u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
1599 u8 num_of_app_pri = MAX_PFC_PRIORITIES;
1600 u8 app_pri_bit = 0;
1601
1602 while (num_spread_of_entries && num_of_app_pri > 0) {
1603 app_pri_bit = 1 << (num_of_app_pri - 1);
1604 if (app_pri_bit & strict_app_pris) {
1605 struct cos_entry_help_data *data = &cos_data->
1606 data[entry];
1607 num_spread_of_entries--;
1608 if (num_spread_of_entries == 0) {
1609 /* last entry needed put all the entries left */
1610 data->cos_bw = DCBX_INVALID_COS_BW;
1611 data->strict = strict_pri;
1612 data->pri_join_mask = strict_app_pris;
1613 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
1614 data->pri_join_mask);
1615 } else {
1616 strict_app_pris &= ~app_pri_bit;
1617
1618 data->cos_bw = DCBX_INVALID_COS_BW;
1619 data->strict = strict_pri;
1620 data->pri_join_mask = app_pri_bit;
1621 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
1622 data->pri_join_mask);
1623 }
1624
1625 strict_pri =
1626 BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
1627 entry++;
1628 }
1629
1630 num_of_app_pri--;
1631 }
1632
1633 if (num_spread_of_entries)
1634 return -EINVAL;
1635
1636 return 0;
1637}
1638
1639static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
1640 struct cos_help_data *cos_data,
1641 u8 entry,
1642 u8 num_spread_of_entries,
1643 u8 strict_app_pris)
1644{
1645
1646 if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
1647 num_spread_of_entries,
1648 strict_app_pris)) {
1649 struct cos_entry_help_data *data = &cos_data->
1650 data[entry];
1651 /* Fill BW entry */
1652 data->cos_bw = DCBX_INVALID_COS_BW;
1653 data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
1654 data->pri_join_mask = strict_app_pris;
1655 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
1656 data->pri_join_mask);
1657 return 1;
1658 }
1659
1660 return num_spread_of_entries;
1661}
1662
1663static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
1664 struct pg_help_data *help_data,
1665 struct dcbx_ets_feature *ets,
1666 struct cos_help_data *cos_data,
1667 u32 pri_join_mask)
1668
1669{
1670 u8 need_num_of_entries = 0;
1671 u8 i = 0;
1672 u8 entry = 0;
1673
1674 /*
1675 * if the number of requested PG-s in CEE is greater than 3
1676 * then the results are not determined since this is a violation
1677 * of the standard.
1678 */
1679 if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
1680 if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
1681 DCBX_COS_MAX_NUM_E3B0)) {
1682 BNX2X_ERR("Unable to reduce the number of PGs -"
1683 "we will disables ETS\n");
1684 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
1685 pri_join_mask);
1686 return;
1687 }
1688 }
1689
1690 for (i = 0 ; i < help_data->num_of_pg; i++) {
1691 struct pg_entry_help_data *pg = &help_data->data[i];
1692 if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
1693 struct cos_entry_help_data *data = &cos_data->
1694 data[entry];
1695 /* Fill BW entry */
1696 data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
1697 data->strict = BNX2X_DCBX_STRICT_INVALID;
1698 data->pri_join_mask = pg->pg_priority;
1699 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
1700 data->pri_join_mask);
1701
1702 entry++;
1703 } else {
1704 need_num_of_entries = min_t(u8,
1705 (u8)pg->num_of_dif_pri,
1706 (u8)DCBX_COS_MAX_NUM_E3B0 -
1707 help_data->num_of_pg + 1);
1708 /*
1709 * If there are still VOQ-s which have no associated PG,
1710 * then associate these VOQ-s to PG15. These PG-s will
1711 * be used for SP between priorities on PG15.
1712 */
1713 entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
1714 entry, need_num_of_entries, pg->pg_priority);
1715 }
1716 }
1717
1718 /* the entry will represent the number of COSes used */
1719 cos_data->num_of_cos = entry;
1720}
1721static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
1722 struct pg_help_data *help_data,
1723 struct dcbx_ets_feature *ets,
1724 u32 *pg_pri_orginal_spread)
1725{
1726 struct cos_help_data cos_data;
1727 u8 i = 0;
1728 u32 pri_join_mask = 0;
1729 u8 num_of_dif_pri = 0;
1730
1731 memset(&cos_data, 0, sizeof(cos_data));
1732
1733 /* Validate the pg value */
1734 for (i = 0; i < help_data->num_of_pg ; i++) {
1735 if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
1736 DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
1737 BNX2X_ERR("Invalid pg[%d] data %x\n", i,
1738 help_data->data[i].pg);
1739 pri_join_mask |= help_data->data[i].pg_priority;
1740 num_of_dif_pri += help_data->data[i].num_of_dif_pri;
1741 }
1742
1743 /* defaults */
1744 cos_data.num_of_cos = 1;
1745 for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
1746 cos_data.data[i].pri_join_mask = 0;
1747 cos_data.data[i].pausable = false;
1748 cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
1749 cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
1750 }
1751
1752 if (CHIP_IS_E3B0(bp))
1753 bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
1754 &cos_data, pri_join_mask);
1755 else /* E2 + E3A0 */
1756 bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
1757 help_data, ets,
1758 &cos_data,
1759 pg_pri_orginal_spread,
1760 pri_join_mask,
1761 num_of_dif_pri);
1762
1763 for (i = 0; i < cos_data.num_of_cos ; i++) {
1764 struct bnx2x_dcbx_cos_params *p =
1765 &bp->dcbx_port_params.ets.cos_params[i];
1766
1767 p->strict = cos_data.data[i].strict;
1768 p->bw_tbl = cos_data.data[i].cos_bw;
1769 p->pri_bitmask = cos_data.data[i].pri_join_mask;
1770 p->pauseable = cos_data.data[i].pausable;
1771
1772 /* sanity */
1773 if (p->bw_tbl != DCBX_INVALID_COS_BW ||
1774 p->strict != BNX2X_DCBX_STRICT_INVALID) {
1775 if (p->pri_bitmask == 0)
1776 BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
1777
1778 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
1779
1780 if (p->pauseable &&
1781 DCBX_PFC_PRI_GET_NON_PAUSE(bp,
1782 p->pri_bitmask) != 0)
1783 BNX2X_ERR("Inconsistent config for "
1784 "pausable COS %d\n", i);
1785
1786 if (!p->pauseable &&
1787 DCBX_PFC_PRI_GET_PAUSE(bp,
1788 p->pri_bitmask) != 0)
1789 BNX2X_ERR("Inconsistent config for "
1790 "nonpausable COS %d\n", i);
1791 }
1792 }
1793
1794 if (p->pauseable)
1795 DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
1796 i, cos_data.data[i].pri_join_mask);
1797 else
1798 DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
1799 "0x%x\n",
1800 i, cos_data.data[i].pri_join_mask);
1801 }
1802
1803 bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
1804}
1805
1806static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
1807 u32 *set_configuration_ets_pg,
1808 u32 *pri_pg_tbl)
1809{
1810 int i;
1811
1812 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
1813 set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
1814
1815 DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
1816 i, set_configuration_ets_pg[i]);
1817 }
1818}
1819
1820static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
1821 struct bnx2x_func_tx_start_params *pfc_fw_cfg)
1822{
1823 u16 pri_bit = 0;
1824 u8 cos = 0, pri = 0;
1825 struct priority_cos *tt2cos;
1826 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1827
1828 memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
1829
1830 /* to disable DCB - the structure must be zeroed */
1831 if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)
1832 return;
1833
1834 /*shortcut*/
1835 tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
1836
1837 /* Fw version should be incremented each update */
1838 pfc_fw_cfg->dcb_version = ++bp->dcb_version;
1839 pfc_fw_cfg->dcb_enabled = 1;
1840
1841 /* Fill priority parameters */
1842 for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
1843 tt2cos[pri].priority = ttp[pri];
1844 pri_bit = 1 << tt2cos[pri].priority;
1845
1846 /* Fill COS parameters based on COS calculated to
1847 * make it more general for future use */
1848 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
1849 if (bp->dcbx_port_params.ets.cos_params[cos].
1850 pri_bitmask & pri_bit)
1851 tt2cos[pri].cos = cos;
1852 }
1853
1854 /* we never want the FW to add a 0 vlan tag */
1855 pfc_fw_cfg->dont_add_pri_0_en = 1;
1856
1857 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
1858}
1859
1860void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
1861{
1862 /* if we need to syncronize DCBX result from prev PMF
1863 * read it from shmem and update bp accordingly
1864 */
1865 if (SHMEM2_HAS(bp, drv_flags) &&
1866 GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
1867 /* Read neg results if dcbx is in the FW */
1868 if (bnx2x_dcbx_read_shmem_neg_results(bp))
1869 return;
1870
1871 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
1872 bp->dcbx_error);
1873 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
1874 bp->dcbx_error);
1875 }
1876}
1877
1878/* DCB netlink */
1879#ifdef BCM_DCBNL
1880
1881#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
1882 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
1883
1884static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
1885{
1886 /* validate dcbnl call that may change HW state:
1887 * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
1888 */
1889 return bp->dcb_state && bp->dcbx_mode_uset;
1890}
1891
1892static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
1893{
1894 struct bnx2x *bp = netdev_priv(netdev);
1895 DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
1896 return bp->dcb_state;
1897}
1898
1899static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
1900{
1901 struct bnx2x *bp = netdev_priv(netdev);
1902 DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
1903
1904 bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
1905 return 0;
1906}
1907
1908static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
1909 u8 *perm_addr)
1910{
1911 struct bnx2x *bp = netdev_priv(netdev);
1912 DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
1913
1914 /* first the HW mac address */
1915 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
1916
1917#ifdef BCM_CNIC
1918 /* second SAN address */
1919 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
1920#endif
1921}
1922
1923static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
1924 u8 prio_type, u8 pgid, u8 bw_pct,
1925 u8 up_map)
1926{
1927 struct bnx2x *bp = netdev_priv(netdev);
1928
1929 DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
1930 if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
1931 return;
1932
1933 /**
1934 * bw_pct ingnored - band-width percentage devision between user
1935 * priorities within the same group is not
1936 * standard and hence not supported
1937 *
1938 * prio_type igonred - priority levels within the same group are not
1939 * standard and hence are not supported. According
1940 * to the standard pgid 15 is dedicated to strict
1941 * prioirty traffic (on the port level).
1942 *
1943 * up_map ignored
1944 */
1945
1946 bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
1947 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
1948}
1949
1950static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
1951 int pgid, u8 bw_pct)
1952{
1953 struct bnx2x *bp = netdev_priv(netdev);
1954 DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
1955
1956 if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
1957 return;
1958
1959 bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
1960 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
1961}
1962
1963static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
1964 u8 prio_type, u8 pgid, u8 bw_pct,
1965 u8 up_map)
1966{
1967 struct bnx2x *bp = netdev_priv(netdev);
1968 DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
1969}
1970
1971static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
1972 int pgid, u8 bw_pct)
1973{
1974 struct bnx2x *bp = netdev_priv(netdev);
1975 DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
1976}
1977
1978static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
1979 u8 *prio_type, u8 *pgid, u8 *bw_pct,
1980 u8 *up_map)
1981{
1982 struct bnx2x *bp = netdev_priv(netdev);
1983 DP(NETIF_MSG_LINK, "prio = %d\n", prio);
1984
1985 /**
1986 * bw_pct ingnored - band-width percentage devision between user
1987 * priorities within the same group is not
1988 * standard and hence not supported
1989 *
1990 * prio_type igonred - priority levels within the same group are not
1991 * standard and hence are not supported. According
1992 * to the standard pgid 15 is dedicated to strict
1993 * prioirty traffic (on the port level).
1994 *
1995 * up_map ignored
1996 */
1997 *up_map = *bw_pct = *prio_type = *pgid = 0;
1998
1999 if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
2000 return;
2001
2002 *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
2003}
2004
2005static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
2006 int pgid, u8 *bw_pct)
2007{
2008 struct bnx2x *bp = netdev_priv(netdev);
2009 DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
2010
2011 *bw_pct = 0;
2012
2013 if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
2014 return;
2015
2016 *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
2017}
2018
2019static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
2020 u8 *prio_type, u8 *pgid, u8 *bw_pct,
2021 u8 *up_map)
2022{
2023 struct bnx2x *bp = netdev_priv(netdev);
2024 DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
2025
2026 *prio_type = *pgid = *bw_pct = *up_map = 0;
2027}
2028
2029static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
2030 int pgid, u8 *bw_pct)
2031{
2032 struct bnx2x *bp = netdev_priv(netdev);
2033 DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
2034
2035 *bw_pct = 0;
2036}
2037
2038static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
2039 u8 setting)
2040{
2041 struct bnx2x *bp = netdev_priv(netdev);
2042 DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
2043
2044 if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
2045 return;
2046
2047 bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
2048
2049 if (setting)
2050 bp->dcbx_config_params.admin_pfc_tx_enable = 1;
2051}
2052
2053static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
2054 u8 *setting)
2055{
2056 struct bnx2x *bp = netdev_priv(netdev);
2057 DP(NETIF_MSG_LINK, "prio = %d\n", prio);
2058
2059 *setting = 0;
2060
2061 if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
2062 return;
2063
2064 *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
2065}
2066
2067static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
2068{
2069 struct bnx2x *bp = netdev_priv(netdev);
2070 int rc = 0;
2071
2072 DP(NETIF_MSG_LINK, "SET-ALL\n");
2073
2074 if (!bnx2x_dcbnl_set_valid(bp))
2075 return 1;
2076
2077 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2078 netdev_err(bp->dev, "Handling parity error recovery. "
2079 "Try again later\n");
2080 return 1;
2081 }
2082 if (netif_running(bp->dev)) {
2083 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2084 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2085 }
2086 DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
2087 if (rc)
2088 return 1;
2089
2090 return 0;
2091}
2092
2093static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
2094{
2095 struct bnx2x *bp = netdev_priv(netdev);
2096 u8 rval = 0;
2097
2098 if (bp->dcb_state) {
2099 switch (capid) {
2100 case DCB_CAP_ATTR_PG:
2101 *cap = true;
2102 break;
2103 case DCB_CAP_ATTR_PFC:
2104 *cap = true;
2105 break;
2106 case DCB_CAP_ATTR_UP2TC:
2107 *cap = false;
2108 break;
2109 case DCB_CAP_ATTR_PG_TCS:
2110 *cap = 0x80; /* 8 priorities for PGs */
2111 break;
2112 case DCB_CAP_ATTR_PFC_TCS:
2113 *cap = 0x80; /* 8 priorities for PFC */
2114 break;
2115 case DCB_CAP_ATTR_GSP:
2116 *cap = true;
2117 break;
2118 case DCB_CAP_ATTR_BCN:
2119 *cap = false;
2120 break;
2121 case DCB_CAP_ATTR_DCBX:
2122 *cap = BNX2X_DCBX_CAPS;
2123 break;
2124 default:
2125 rval = -EINVAL;
2126 break;
2127 }
2128 } else
2129 rval = -EINVAL;
2130
2131 DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
2132 return rval;
2133}
2134
2135static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
2136{
2137 struct bnx2x *bp = netdev_priv(netdev);
2138 u8 rval = 0;
2139
2140 DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
2141
2142 if (bp->dcb_state) {
2143 switch (tcid) {
2144 case DCB_NUMTCS_ATTR_PG:
2145 *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
2146 DCBX_COS_MAX_NUM_E2;
2147 break;
2148 case DCB_NUMTCS_ATTR_PFC:
2149 *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
2150 DCBX_COS_MAX_NUM_E2;
2151 break;
2152 default:
2153 rval = -EINVAL;
2154 break;
2155 }
2156 } else
2157 rval = -EINVAL;
2158
2159 return rval;
2160}
2161
2162static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
2163{
2164 struct bnx2x *bp = netdev_priv(netdev);
2165 DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
2166 return -EINVAL;
2167}
2168
2169static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
2170{
2171 struct bnx2x *bp = netdev_priv(netdev);
2172 DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
2173
2174 if (!bp->dcb_state)
2175 return 0;
2176
2177 return bp->dcbx_local_feat.pfc.enabled;
2178}
2179
2180static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
2181{
2182 struct bnx2x *bp = netdev_priv(netdev);
2183 DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
2184
2185 if (!bnx2x_dcbnl_set_valid(bp))
2186 return;
2187
2188 bp->dcbx_config_params.admin_pfc_tx_enable =
2189 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
2190}
2191
2192static void bnx2x_admin_app_set_ent(
2193 struct bnx2x_admin_priority_app_table *app_ent,
2194 u8 idtype, u16 idval, u8 up)
2195{
2196 app_ent->valid = 1;
2197
2198 switch (idtype) {
2199 case DCB_APP_IDTYPE_ETHTYPE:
2200 app_ent->traffic_type = TRAFFIC_TYPE_ETH;
2201 break;
2202 case DCB_APP_IDTYPE_PORTNUM:
2203 app_ent->traffic_type = TRAFFIC_TYPE_PORT;
2204 break;
2205 default:
2206 break; /* never gets here */
2207 }
2208 app_ent->app_id = idval;
2209 app_ent->priority = up;
2210}
2211
2212static bool bnx2x_admin_app_is_equal(
2213 struct bnx2x_admin_priority_app_table *app_ent,
2214 u8 idtype, u16 idval)
2215{
2216 if (!app_ent->valid)
2217 return false;
2218
2219 switch (idtype) {
2220 case DCB_APP_IDTYPE_ETHTYPE:
2221 if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
2222 return false;
2223 break;
2224 case DCB_APP_IDTYPE_PORTNUM:
2225 if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
2226 return false;
2227 break;
2228 default:
2229 return false;
2230 }
2231 if (app_ent->app_id != idval)
2232 return false;
2233
2234 return true;
2235}
2236
2237static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
2238{
2239 int i, ff;
2240
2241 /* iterate over the app entries looking for idtype and idval */
2242 for (i = 0, ff = -1; i < 4; i++) {
2243 struct bnx2x_admin_priority_app_table *app_ent =
2244 &bp->dcbx_config_params.admin_priority_app_table[i];
2245 if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
2246 break;
2247
2248 if (ff < 0 && !app_ent->valid)
2249 ff = i;
2250 }
2251 if (i < 4)
2252 /* if found overwrite up */
2253 bp->dcbx_config_params.
2254 admin_priority_app_table[i].priority = up;
2255 else if (ff >= 0)
2256 /* not found use first-free */
2257 bnx2x_admin_app_set_ent(
2258 &bp->dcbx_config_params.admin_priority_app_table[ff],
2259 idtype, idval, up);
2260 else
2261 /* app table is full */
2262 return -EBUSY;
2263
2264 /* up configured, if not 0 make sure feature is enabled */
2265 if (up)
2266 bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
2267
2268 return 0;
2269}
2270
2271static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
2272 u16 idval, u8 up)
2273{
2274 struct bnx2x *bp = netdev_priv(netdev);
2275
2276 DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
2277 idtype, idval, up);
2278
2279 if (!bnx2x_dcbnl_set_valid(bp))
2280 return -EINVAL;
2281
2282 /* verify idtype */
2283 switch (idtype) {
2284 case DCB_APP_IDTYPE_ETHTYPE:
2285 case DCB_APP_IDTYPE_PORTNUM:
2286 break;
2287 default:
2288 return -EINVAL;
2289 }
2290 return bnx2x_set_admin_app_up(bp, idtype, idval, up);
2291}
2292
2293static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
2294{
2295 struct bnx2x *bp = netdev_priv(netdev);
2296 u8 state;
2297
2298 state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
2299
2300 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
2301 state |= DCB_CAP_DCBX_STATIC;
2302
2303 return state;
2304}
2305
2306static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
2307{
2308 struct bnx2x *bp = netdev_priv(netdev);
2309 DP(NETIF_MSG_LINK, "state = %02x\n", state);
2310
2311 /* set dcbx mode */
2312
2313 if ((state & BNX2X_DCBX_CAPS) != state) {
2314 BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
2315 "capabilities\n", state);
2316 return 1;
2317 }
2318
2319 if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
2320 BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
2321 return 1;
2322 }
2323
2324 if (state & DCB_CAP_DCBX_STATIC)
2325 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
2326 else
2327 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
2328
2329 bp->dcbx_mode_uset = true;
2330 return 0;
2331}
2332
2333static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2334 u8 *flags)
2335{
2336 struct bnx2x *bp = netdev_priv(netdev);
2337 u8 rval = 0;
2338
2339 DP(NETIF_MSG_LINK, "featid %d\n", featid);
2340
2341 if (bp->dcb_state) {
2342 *flags = 0;
2343 switch (featid) {
2344 case DCB_FEATCFG_ATTR_PG:
2345 if (bp->dcbx_local_feat.ets.enabled)
2346 *flags |= DCB_FEATCFG_ENABLE;
2347 if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
2348 *flags |= DCB_FEATCFG_ERROR;
2349 break;
2350 case DCB_FEATCFG_ATTR_PFC:
2351 if (bp->dcbx_local_feat.pfc.enabled)
2352 *flags |= DCB_FEATCFG_ENABLE;
2353 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
2354 DCBX_LOCAL_PFC_MISMATCH))
2355 *flags |= DCB_FEATCFG_ERROR;
2356 break;
2357 case DCB_FEATCFG_ATTR_APP:
2358 if (bp->dcbx_local_feat.app.enabled)
2359 *flags |= DCB_FEATCFG_ENABLE;
2360 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
2361 DCBX_LOCAL_APP_MISMATCH))
2362 *flags |= DCB_FEATCFG_ERROR;
2363 break;
2364 default:
2365 rval = -EINVAL;
2366 break;
2367 }
2368 } else
2369 rval = -EINVAL;
2370
2371 return rval;
2372}
2373
2374static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
2375 u8 flags)
2376{
2377 struct bnx2x *bp = netdev_priv(netdev);
2378 u8 rval = 0;
2379
2380 DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
2381
2382 /* ignore the 'advertise' flag */
2383 if (bnx2x_dcbnl_set_valid(bp)) {
2384 switch (featid) {
2385 case DCB_FEATCFG_ATTR_PG:
2386 bp->dcbx_config_params.admin_ets_enable =
2387 flags & DCB_FEATCFG_ENABLE ? 1 : 0;
2388 bp->dcbx_config_params.admin_ets_willing =
2389 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2390 break;
2391 case DCB_FEATCFG_ATTR_PFC:
2392 bp->dcbx_config_params.admin_pfc_enable =
2393 flags & DCB_FEATCFG_ENABLE ? 1 : 0;
2394 bp->dcbx_config_params.admin_pfc_willing =
2395 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2396 break;
2397 case DCB_FEATCFG_ATTR_APP:
2398 /* ignore enable, always enabled */
2399 bp->dcbx_config_params.admin_app_priority_willing =
2400 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2401 break;
2402 default:
2403 rval = -EINVAL;
2404 break;
2405 }
2406 } else
2407 rval = -EINVAL;
2408
2409 return rval;
2410}
2411
2412static int bnx2x_peer_appinfo(struct net_device *netdev,
2413 struct dcb_peer_app_info *info, u16* app_count)
2414{
2415 int i;
2416 struct bnx2x *bp = netdev_priv(netdev);
2417
2418 DP(NETIF_MSG_LINK, "APP-INFO\n");
2419
2420 info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
2421 info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
2422 *app_count = 0;
2423
2424 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
2425 if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
2426 DCBX_APP_ENTRY_VALID)
2427 (*app_count)++;
2428 return 0;
2429}
2430
2431static int bnx2x_peer_apptable(struct net_device *netdev,
2432 struct dcb_app *table)
2433{
2434 int i, j;
2435 struct bnx2x *bp = netdev_priv(netdev);
2436
2437 DP(NETIF_MSG_LINK, "APP-TABLE\n");
2438
2439 for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
2440 struct dcbx_app_priority_entry *ent =
2441 &bp->dcbx_remote_feat.app.app_pri_tbl[i];
2442
2443 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
2444 table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
2445 table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
2446 table[j++].protocol = ent->app_id;
2447 }
2448 }
2449 return 0;
2450}
2451
2452static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
2453{
2454 int i;
2455 struct bnx2x *bp = netdev_priv(netdev);
2456
2457 pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
2458
2459 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
2460 pg->pg_bw[i] =
2461 DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
2462 pg->prio_pg[i] =
2463 DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
2464 }
2465 return 0;
2466}
2467
2468static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
2469 struct cee_pfc *pfc)
2470{
2471 struct bnx2x *bp = netdev_priv(netdev);
2472 pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
2473 pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
2474 return 0;
2475}
2476
2477const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2478 .getstate = bnx2x_dcbnl_get_state,
2479 .setstate = bnx2x_dcbnl_set_state,
2480 .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr,
2481 .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx,
2482 .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx,
2483 .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx,
2484 .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx,
2485 .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx,
2486 .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx,
2487 .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx,
2488 .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx,
2489 .setpfccfg = bnx2x_dcbnl_set_pfc_cfg,
2490 .getpfccfg = bnx2x_dcbnl_get_pfc_cfg,
2491 .setall = bnx2x_dcbnl_set_all,
2492 .getcap = bnx2x_dcbnl_get_cap,
2493 .getnumtcs = bnx2x_dcbnl_get_numtcs,
2494 .setnumtcs = bnx2x_dcbnl_set_numtcs,
2495 .getpfcstate = bnx2x_dcbnl_get_pfc_state,
2496 .setpfcstate = bnx2x_dcbnl_set_pfc_state,
2497 .setapp = bnx2x_dcbnl_set_app_up,
2498 .getdcbx = bnx2x_dcbnl_get_dcbx,
2499 .setdcbx = bnx2x_dcbnl_set_dcbx,
2500 .getfeatcfg = bnx2x_dcbnl_get_featcfg,
2501 .setfeatcfg = bnx2x_dcbnl_set_featcfg,
2502 .peer_getappinfo = bnx2x_peer_appinfo,
2503 .peer_getapptable = bnx2x_peer_apptable,
2504 .cee_peer_getpg = bnx2x_cee_peer_getpg,
2505 .cee_peer_getpfc = bnx2x_cee_peer_getpfc,
2506};
2507
2508#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
new file mode 100644
index 00000000000..2c6a3bca6f2
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,203 @@
1/* bnx2x_dcb.h: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Dmitry Kravkov
17 *
18 */
19#ifndef BNX2X_DCB_H
20#define BNX2X_DCB_H
21
22#include "bnx2x_hsi.h"
23
24#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
25struct bnx2x_dcbx_app_params {
26 u32 enabled;
27 u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
28};
29
30#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS
31/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
32#define BNX2X_MAX_COS_SUPPORT 3
33#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT
34#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT
35
36struct bnx2x_dcbx_cos_params {
37 u32 bw_tbl;
38 u32 pri_bitmask;
39 /*
40 * strict priority: valid values are 0..5; 0 is highest priority.
41 * There can't be two COSes with the same priority.
42 */
43 u8 strict;
44#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM
45#define BNX2X_DCBX_STRICT_COS_HIGHEST 0
46#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1)
47 u8 pauseable;
48};
49
50struct bnx2x_dcbx_pg_params {
51 u32 enabled;
52 u8 num_of_cos; /* valid COS entries */
53 struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM];
54};
55
56struct bnx2x_dcbx_pfc_params {
57 u32 enabled;
58 u32 priority_non_pauseable_mask;
59};
60
61struct bnx2x_dcbx_port_params {
62 struct bnx2x_dcbx_pfc_params pfc;
63 struct bnx2x_dcbx_pg_params ets;
64 struct bnx2x_dcbx_app_params app;
65};
66
67#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
68#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
69#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
70#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
71#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
72 (bp)->dcbx_port_params.ets.enabled)
73
74struct bnx2x_config_lldp_params {
75 u32 overwrite_settings;
76 u32 msg_tx_hold;
77 u32 msg_fast_tx;
78 u32 tx_credit_max;
79 u32 msg_tx_interval;
80 u32 tx_fast;
81};
82
83struct bnx2x_admin_priority_app_table {
84 u32 valid;
85 u32 priority;
86#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
87 u32 traffic_type;
88#define TRAFFIC_TYPE_ETH 0
89#define TRAFFIC_TYPE_PORT 1
90 u32 app_id;
91};
92
93struct bnx2x_config_dcbx_params {
94 u32 overwrite_settings;
95 u32 admin_dcbx_version;
96 u32 admin_ets_enable;
97 u32 admin_pfc_enable;
98 u32 admin_tc_supported_tx_enable;
99 u32 admin_ets_configuration_tx_enable;
100 u32 admin_ets_recommendation_tx_enable;
101 u32 admin_pfc_tx_enable;
102 u32 admin_application_priority_tx_enable;
103 u32 admin_ets_willing;
104 u32 admin_ets_reco_valid;
105 u32 admin_pfc_willing;
106 u32 admin_app_priority_willing;
107 u32 admin_configuration_bw_precentage[8];
108 u32 admin_configuration_ets_pg[8];
109 u32 admin_recommendation_bw_precentage[8];
110 u32 admin_recommendation_ets_pg[8];
111 u32 admin_pfc_bitmap;
112 struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
113 u32 admin_default_priority;
114};
115
116#define GET_FLAGS(flags, bits) ((flags) & (bits))
117#define SET_FLAGS(flags, bits) ((flags) |= (bits))
118#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
119
120enum {
121 DCBX_READ_LOCAL_MIB,
122 DCBX_READ_REMOTE_MIB
123};
124
125#define ETH_TYPE_FCOE (0x8906)
126#define TCP_PORT_ISCSI (0xCBC)
127
128#define PFC_VALUE_FRAME_SIZE (512)
129#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
130 ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
131
132#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
133#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
134
135
136
137struct cos_entry_help_data {
138 u32 pri_join_mask;
139 u32 cos_bw;
140 u8 strict;
141 bool pausable;
142};
143
144struct cos_help_data {
145 struct cos_entry_help_data data[DCBX_COS_MAX_NUM];
146 u8 num_of_cos;
147};
148
149#define DCBX_ILLEGAL_PG (0xFF)
150#define DCBX_PFC_PRI_MASK (0xFF)
151#define DCBX_STRICT_PRIORITY (15)
152#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
153#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
154 ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
155#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
156 ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
157#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
158 ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
159#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
160 (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
161#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \
162 (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
163#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
164 (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
165#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
166 ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
167#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
168 (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
169 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
170
171
172struct pg_entry_help_data {
173 u8 num_of_dif_pri;
174 u8 pg;
175 u32 pg_priority;
176};
177
178struct pg_help_data {
179 struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
180 u8 num_of_pg;
181};
182
183/* forward DCB/PFC related declarations */
184struct bnx2x;
185void bnx2x_dcbx_update(struct work_struct *work);
186void bnx2x_dcbx_init_params(struct bnx2x *bp);
187void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
188
189enum {
190 BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
191 BNX2X_DCBX_STATE_TX_PAUSED,
192 BNX2X_DCBX_STATE_TX_RELEASED
193};
194
195void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
196void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
197/* DCB netlink */
198#ifdef BCM_DCBNL
199extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
200int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
201#endif /* BCM_DCBNL */
202
203#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
new file mode 100644
index 00000000000..b983825d0ee
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -0,0 +1,1156 @@
1/* bnx2x_dump.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 */
15
16
17/* This struct holds a signature to ensure the dump returned from the driver
18 * match the meta data file inserted to grc_dump.tcl
19 * The signature is time stamp, diag version and grc_dump version
20 */
21
22#ifndef BNX2X_DUMP_H
23#define BNX2X_DUMP_H
24
25
26
27/*definitions */
28#define XSTORM_WAITP_ADDR 0x2b8a80
29#define TSTORM_WAITP_ADDR 0x1b8a80
30#define USTORM_WAITP_ADDR 0x338a80
31#define CSTORM_WAITP_ADDR 0x238a80
32#define TSTORM_CAM_MODE 0x1B1440
33
34#define MAX_TIMER_PENDING 200
35#define TIMER_SCAN_DONT_CARE 0xFF
36#define RI_E1 0x1
37#define RI_E1H 0x2
38#define RI_E2 0x4
39#define RI_E3 0x8
40#define RI_E3B0 0x10
41#define RI_ONLINE 0x100
42#define RI_OFFLINE 0x0
43#define RI_PATH0_DUMP 0x200
44#define RI_PATH1_DUMP 0x400
45
46#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
47#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
48#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
49#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
50#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE)
51#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE)
52#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
53#define RI_E3_ONLINE (RI_E3 | RI_ONLINE)
54#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE)
55#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE)
56#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
57#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE)
58#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
59#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
60#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
61#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE)
62#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE)
63#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE)
64#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
65#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE)
66#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
67#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
68#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
69#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE)
70#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
71#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
72#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
73#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
74#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
75#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
76#define RI_E1E1HE2E3E3B0_ONLINE \
77 (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
78#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE)
79#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE)
80#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE)
81#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE)
82#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE)
83#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE)
84#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
85#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE)
86#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE)
87#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE)
88#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
89#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE)
90#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
91#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
92#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
93#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE)
94#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE)
95#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE)
96#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
97#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE)
98#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
99#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
100#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
101#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE)
102#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
103#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
104#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
105#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
106#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
107#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
108#define RI_E1E1HE2E3E3B0_OFFLINE \
109 (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
110#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE
111#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE
112
113#define DBG_DMP_TRACE_BUFFER_SIZE 0x800
114#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
115 ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
116
117struct dump_sign {
118 u32 time_stamp;
119 u32 diag_ver;
120 u32 grc_dump_ver;
121};
122
123struct dump_hdr {
124 u32 hdr_size; /* in dwords, excluding this field */
125 struct dump_sign dump_sign;
126 u32 xstorm_waitp;
127 u32 tstorm_waitp;
128 u32 ustorm_waitp;
129 u32 cstorm_waitp;
130 u16 info;
131 u8 idle_chk;
132 u8 reserved;
133};
134
135struct reg_addr {
136 u32 addr;
137 u32 size;
138 u16 info;
139};
140
141struct wreg_addr {
142 u32 addr;
143 u32 size;
144 u32 read_regs_count;
145 const u32 *read_regs;
146 u16 info;
147};
148
149static const struct reg_addr reg_addrs[] = {
150 { 0x2000, 341, RI_ALL_ONLINE },
151 { 0x2800, 103, RI_ALL_ONLINE },
152 { 0x3000, 287, RI_ALL_ONLINE },
153 { 0x3800, 331, RI_ALL_ONLINE },
154 { 0x8800, 6, RI_ALL_ONLINE },
155 { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
156 { 0x9000, 147, RI_E2E3E3B0_ONLINE },
157 { 0x924c, 1, RI_E2_ONLINE },
158 { 0x9250, 16, RI_E2E3E3B0_ONLINE },
159 { 0x9400, 33, RI_E2E3E3B0_ONLINE },
160 { 0x9484, 5, RI_E3E3B0_ONLINE },
161 { 0xa000, 27, RI_ALL_ONLINE },
162 { 0xa06c, 1, RI_E1E1H_ONLINE },
163 { 0xa070, 71, RI_ALL_ONLINE },
164 { 0xa18c, 4, RI_E1E1H_ONLINE },
165 { 0xa19c, 62, RI_ALL_ONLINE },
166 { 0xa294, 2, RI_E1E1H_ONLINE },
167 { 0xa29c, 2, RI_ALL_ONLINE },
168 { 0xa2a4, 2, RI_E1E1HE2_ONLINE },
169 { 0xa2ac, 52, RI_ALL_ONLINE },
170 { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
171 { 0xa3b8, 2, RI_E3E3B0_ONLINE },
172 { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
173 { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
174 { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
175 { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
176 { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
177 { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
178 { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
179 { 0xa400, 40, RI_ALL_ONLINE },
180 { 0xa4a0, 1, RI_E1E1HE2_ONLINE },
181 { 0xa4a4, 2, RI_ALL_ONLINE },
182 { 0xa4ac, 2, RI_E1E1H_ONLINE },
183 { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
184 { 0xa4b8, 2, RI_E1E1H_ONLINE },
185 { 0xa4c0, 3, RI_ALL_ONLINE },
186 { 0xa4cc, 5, RI_E1E1H_ONLINE },
187 { 0xa4e0, 3, RI_ALL_ONLINE },
188 { 0xa4fc, 2, RI_ALL_ONLINE },
189 { 0xa504, 1, RI_E1E1H_ONLINE },
190 { 0xa508, 3, RI_ALL_ONLINE },
191 { 0xa518, 1, RI_ALL_ONLINE },
192 { 0xa520, 1, RI_ALL_ONLINE },
193 { 0xa528, 1, RI_ALL_ONLINE },
194 { 0xa530, 1, RI_ALL_ONLINE },
195 { 0xa538, 1, RI_ALL_ONLINE },
196 { 0xa540, 1, RI_ALL_ONLINE },
197 { 0xa548, 1, RI_E1E1H_ONLINE },
198 { 0xa550, 1, RI_E1E1H_ONLINE },
199 { 0xa558, 1, RI_E1E1H_ONLINE },
200 { 0xa560, 1, RI_E1E1H_ONLINE },
201 { 0xa568, 1, RI_E1E1H_ONLINE },
202 { 0xa570, 1, RI_ALL_ONLINE },
203 { 0xa580, 1, RI_ALL_ONLINE },
204 { 0xa590, 1, RI_ALL_ONLINE },
205 { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
206 { 0xa5c0, 1, RI_ALL_ONLINE },
207 { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
208 { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
209 { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
210 { 0xa5f8, 1, RI_E1HE2_ONLINE },
211 { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
212 { 0xa620, 6, RI_E2E3E3B0_ONLINE },
213 { 0xa638, 20, RI_E2_ONLINE },
214 { 0xa688, 42, RI_E2E3E3B0_ONLINE },
215 { 0xa730, 1, RI_E2_ONLINE },
216 { 0xa734, 2, RI_E2E3E3B0_ONLINE },
217 { 0xa73c, 4, RI_E2_ONLINE },
218 { 0xa74c, 5, RI_E2E3E3B0_ONLINE },
219 { 0xa760, 5, RI_E2_ONLINE },
220 { 0xa774, 7, RI_E2E3E3B0_ONLINE },
221 { 0xa790, 15, RI_E2_ONLINE },
222 { 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
223 { 0xa7e0, 6, RI_E3E3B0_ONLINE },
224 { 0xa800, 18, RI_E2_ONLINE },
225 { 0xa848, 33, RI_E2E3E3B0_ONLINE },
226 { 0xa8cc, 2, RI_E3E3B0_ONLINE },
227 { 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
228 { 0xa8e4, 1, RI_E3E3B0_ONLINE },
229 { 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
230 { 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
231 { 0xa8f8, 30, RI_E3E3B0_ONLINE },
232 { 0xa974, 73, RI_E3E3B0_ONLINE },
233 { 0xac30, 1, RI_E3E3B0_ONLINE },
234 { 0xac40, 1, RI_E3E3B0_ONLINE },
235 { 0xac50, 1, RI_E3E3B0_ONLINE },
236 { 0xac60, 1, RI_E3B0_ONLINE },
237 { 0x10000, 9, RI_ALL_ONLINE },
238 { 0x10024, 1, RI_E1E1HE2_ONLINE },
239 { 0x10028, 5, RI_ALL_ONLINE },
240 { 0x1003c, 6, RI_E1E1HE2_ONLINE },
241 { 0x10054, 20, RI_ALL_ONLINE },
242 { 0x100a4, 4, RI_E1E1HE2_ONLINE },
243 { 0x100b4, 11, RI_ALL_ONLINE },
244 { 0x100e0, 4, RI_E1E1HE2_ONLINE },
245 { 0x100f0, 8, RI_ALL_ONLINE },
246 { 0x10110, 6, RI_E1E1HE2_ONLINE },
247 { 0x10128, 110, RI_ALL_ONLINE },
248 { 0x102e0, 4, RI_E1E1HE2_ONLINE },
249 { 0x102f0, 18, RI_ALL_ONLINE },
250 { 0x10338, 20, RI_E1E1HE2_ONLINE },
251 { 0x10388, 10, RI_ALL_ONLINE },
252 { 0x10400, 6, RI_E1E1HE2_ONLINE },
253 { 0x10418, 6, RI_ALL_ONLINE },
254 { 0x10430, 10, RI_E1E1HE2_ONLINE },
255 { 0x10458, 22, RI_ALL_ONLINE },
256 { 0x104b0, 12, RI_E1E1HE2_ONLINE },
257 { 0x104e0, 1, RI_ALL_ONLINE },
258 { 0x104e8, 2, RI_ALL_ONLINE },
259 { 0x104f4, 2, RI_ALL_ONLINE },
260 { 0x10500, 146, RI_ALL_ONLINE },
261 { 0x10750, 2, RI_E1E1HE2_ONLINE },
262 { 0x10760, 2, RI_E1E1HE2_ONLINE },
263 { 0x10770, 2, RI_E1E1HE2_ONLINE },
264 { 0x10780, 2, RI_E1E1HE2_ONLINE },
265 { 0x10790, 2, RI_ALL_ONLINE },
266 { 0x107a0, 2, RI_E1E1HE2_ONLINE },
267 { 0x107b0, 2, RI_E1E1HE2_ONLINE },
268 { 0x107c0, 2, RI_E1E1HE2_ONLINE },
269 { 0x107d0, 2, RI_E1E1HE2_ONLINE },
270 { 0x107e0, 2, RI_ALL_ONLINE },
271 { 0x10880, 2, RI_ALL_ONLINE },
272 { 0x10900, 2, RI_ALL_ONLINE },
273 { 0x16000, 1, RI_E1HE2_ONLINE },
274 { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
275 { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
276 { 0x16090, 4, RI_E1HE2E3_ONLINE },
277 { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
278 { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
279 { 0x160dc, 2, RI_E1HE2_ONLINE },
280 { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
281 { 0x1610c, 2, RI_E1HE2_ONLINE },
282 { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
283 { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
284 { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
285 { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
286 { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
287 { 0x18010, 35, RI_E2E3E3B0_ONLINE },
288 { 0x180a4, 2, RI_E2E3E3B0_ONLINE },
289 { 0x180c0, 9, RI_E2E3E3B0_ONLINE },
290 { 0x180e4, 1, RI_E2E3_ONLINE },
291 { 0x180e8, 2, RI_E2E3E3B0_ONLINE },
292 { 0x180f0, 1, RI_E2E3_ONLINE },
293 { 0x180f4, 79, RI_E2E3E3B0_ONLINE },
294 { 0x18230, 1, RI_E2E3_ONLINE },
295 { 0x18234, 2, RI_E2E3E3B0_ONLINE },
296 { 0x1823c, 1, RI_E2E3_ONLINE },
297 { 0x18240, 13, RI_E2E3E3B0_ONLINE },
298 { 0x18274, 1, RI_E2_ONLINE },
299 { 0x18278, 81, RI_E2E3E3B0_ONLINE },
300 { 0x18440, 63, RI_E2E3E3B0_ONLINE },
301 { 0x18570, 42, RI_E3E3B0_ONLINE },
302 { 0x18618, 25, RI_E3B0_ONLINE },
303 { 0x18680, 44, RI_E3B0_ONLINE },
304 { 0x18748, 12, RI_E3B0_ONLINE },
305 { 0x18788, 1, RI_E3B0_ONLINE },
306 { 0x1879c, 6, RI_E3B0_ONLINE },
307 { 0x187c4, 51, RI_E3B0_ONLINE },
308 { 0x18a00, 48, RI_E3B0_ONLINE },
309 { 0x20000, 24, RI_ALL_ONLINE },
310 { 0x20060, 8, RI_ALL_ONLINE },
311 { 0x20080, 94, RI_ALL_ONLINE },
312 { 0x201f8, 1, RI_E1E1H_ONLINE },
313 { 0x201fc, 1, RI_ALL_ONLINE },
314 { 0x20200, 1, RI_E1E1H_ONLINE },
315 { 0x20204, 1, RI_ALL_ONLINE },
316 { 0x20208, 1, RI_E1E1H_ONLINE },
317 { 0x2020c, 39, RI_ALL_ONLINE },
318 { 0x202c8, 1, RI_E2E3E3B0_ONLINE },
319 { 0x202d8, 4, RI_E2E3E3B0_ONLINE },
320 { 0x202f0, 1, RI_E3B0_ONLINE },
321 { 0x20400, 2, RI_ALL_ONLINE },
322 { 0x2040c, 8, RI_ALL_ONLINE },
323 { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
324 { 0x20480, 1, RI_ALL_ONLINE },
325 { 0x20500, 1, RI_ALL_ONLINE },
326 { 0x20600, 1, RI_ALL_ONLINE },
327 { 0x28000, 1, RI_ALL_ONLINE },
328 { 0x28004, 8191, RI_ALL_OFFLINE },
329 { 0x30000, 1, RI_ALL_ONLINE },
330 { 0x30004, 16383, RI_ALL_OFFLINE },
331 { 0x40000, 98, RI_ALL_ONLINE },
332 { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
333 { 0x401c8, 1, RI_E1H_ONLINE },
334 { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
335 { 0x401d4, 2, RI_E2E3E3B0_ONLINE },
336 { 0x40200, 4, RI_ALL_ONLINE },
337 { 0x40220, 6, RI_E2E3E3B0_ONLINE },
338 { 0x40238, 8, RI_E2E3_ONLINE },
339 { 0x40258, 4, RI_E2E3E3B0_ONLINE },
340 { 0x40268, 2, RI_E3E3B0_ONLINE },
341 { 0x40270, 17, RI_E3B0_ONLINE },
342 { 0x40400, 43, RI_ALL_ONLINE },
343 { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
344 { 0x404e0, 1, RI_E2E3E3B0_ONLINE },
345 { 0x40500, 2, RI_ALL_ONLINE },
346 { 0x40510, 2, RI_ALL_ONLINE },
347 { 0x40520, 2, RI_ALL_ONLINE },
348 { 0x40530, 2, RI_ALL_ONLINE },
349 { 0x40540, 2, RI_ALL_ONLINE },
350 { 0x40550, 10, RI_E2E3E3B0_ONLINE },
351 { 0x40610, 2, RI_E2E3E3B0_ONLINE },
352 { 0x42000, 164, RI_ALL_ONLINE },
353 { 0x422c0, 4, RI_E2E3E3B0_ONLINE },
354 { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
355 { 0x422e8, 1, RI_E2E3E3B0_ONLINE },
356 { 0x42400, 49, RI_ALL_ONLINE },
357 { 0x424c8, 38, RI_ALL_ONLINE },
358 { 0x42568, 2, RI_ALL_ONLINE },
359 { 0x42640, 5, RI_E2E3E3B0_ONLINE },
360 { 0x42800, 1, RI_ALL_ONLINE },
361 { 0x50000, 1, RI_ALL_ONLINE },
362 { 0x50004, 19, RI_ALL_ONLINE },
363 { 0x50050, 8, RI_ALL_ONLINE },
364 { 0x50070, 88, RI_ALL_ONLINE },
365 { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
366 { 0x50200, 2, RI_ALL_ONLINE },
367 { 0x5020c, 7, RI_ALL_ONLINE },
368 { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
369 { 0x50240, 1, RI_ALL_ONLINE },
370 { 0x50280, 1, RI_ALL_ONLINE },
371 { 0x50300, 1, RI_E2E3E3B0_ONLINE },
372 { 0x5030c, 1, RI_E2E3E3B0_ONLINE },
373 { 0x50318, 1, RI_E2E3E3B0_ONLINE },
374 { 0x5031c, 1, RI_E2E3E3B0_ONLINE },
375 { 0x50320, 2, RI_E2E3E3B0_ONLINE },
376 { 0x50330, 1, RI_E3B0_ONLINE },
377 { 0x52000, 1, RI_ALL_ONLINE },
378 { 0x54000, 1, RI_ALL_ONLINE },
379 { 0x54004, 3327, RI_ALL_OFFLINE },
380 { 0x58000, 1, RI_ALL_ONLINE },
381 { 0x58004, 8191, RI_E1E1H_OFFLINE },
382 { 0x60000, 26, RI_ALL_ONLINE },
383 { 0x60068, 8, RI_E1E1H_ONLINE },
384 { 0x60088, 12, RI_ALL_ONLINE },
385 { 0x600b8, 9, RI_E1E1H_ONLINE },
386 { 0x600dc, 1, RI_ALL_ONLINE },
387 { 0x600e0, 5, RI_E1E1H_ONLINE },
388 { 0x600f4, 1, RI_E1E1HE2_ONLINE },
389 { 0x600f8, 1, RI_E1E1H_ONLINE },
390 { 0x600fc, 8, RI_ALL_ONLINE },
391 { 0x6013c, 24, RI_E1H_ONLINE },
392 { 0x6019c, 2, RI_E2E3E3B0_ONLINE },
393 { 0x601ac, 18, RI_E2E3E3B0_ONLINE },
394 { 0x60200, 1, RI_ALL_ONLINE },
395 { 0x60204, 2, RI_ALL_OFFLINE },
396 { 0x60210, 13, RI_E2E3E3B0_ONLINE },
397 { 0x60244, 16, RI_E3B0_ONLINE },
398 { 0x61000, 1, RI_ALL_ONLINE },
399 { 0x61004, 511, RI_ALL_OFFLINE },
400 { 0x61800, 512, RI_E3E3B0_OFFLINE },
401 { 0x70000, 8, RI_ALL_ONLINE },
402 { 0x70020, 8184, RI_ALL_OFFLINE },
403 { 0x78000, 8192, RI_E3E3B0_OFFLINE },
404 { 0x85000, 3, RI_ALL_ONLINE },
405 { 0x8501c, 7, RI_ALL_ONLINE },
406 { 0x85048, 1, RI_ALL_ONLINE },
407 { 0x85200, 32, RI_ALL_ONLINE },
408 { 0xb0000, 16384, RI_E1H_ONLINE },
409 { 0xc1000, 7, RI_ALL_ONLINE },
410 { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
411 { 0xc1800, 2, RI_ALL_ONLINE },
412 { 0xc2000, 164, RI_ALL_ONLINE },
413 { 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
414 { 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
415 { 0xc2400, 49, RI_ALL_ONLINE },
416 { 0xc24c8, 38, RI_ALL_ONLINE },
417 { 0xc2568, 2, RI_ALL_ONLINE },
418 { 0xc2600, 1, RI_ALL_ONLINE },
419 { 0xc4000, 165, RI_ALL_ONLINE },
420 { 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
421 { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
422 { 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
423 { 0xc4400, 51, RI_ALL_ONLINE },
424 { 0xc44d0, 38, RI_ALL_ONLINE },
425 { 0xc4570, 2, RI_ALL_ONLINE },
426 { 0xc4578, 5, RI_E2E3E3B0_ONLINE },
427 { 0xc4600, 1, RI_ALL_ONLINE },
428 { 0xd0000, 19, RI_ALL_ONLINE },
429 { 0xd004c, 8, RI_ALL_ONLINE },
430 { 0xd006c, 91, RI_ALL_ONLINE },
431 { 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
432 { 0xd0200, 2, RI_ALL_ONLINE },
433 { 0xd020c, 7, RI_ALL_ONLINE },
434 { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
435 { 0xd0280, 1, RI_ALL_ONLINE },
436 { 0xd0300, 1, RI_ALL_ONLINE },
437 { 0xd0400, 1, RI_ALL_ONLINE },
438 { 0xd0818, 1, RI_E3B0_ONLINE },
439 { 0xd4000, 1, RI_ALL_ONLINE },
440 { 0xd4004, 2559, RI_ALL_OFFLINE },
441 { 0xd8000, 1, RI_ALL_ONLINE },
442 { 0xd8004, 8191, RI_ALL_OFFLINE },
443 { 0xe0000, 21, RI_ALL_ONLINE },
444 { 0xe0054, 8, RI_ALL_ONLINE },
445 { 0xe0074, 49, RI_ALL_ONLINE },
446 { 0xe0138, 1, RI_E1E1H_ONLINE },
447 { 0xe013c, 35, RI_ALL_ONLINE },
448 { 0xe01f4, 1, RI_E2_ONLINE },
449 { 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
450 { 0xe0200, 2, RI_ALL_ONLINE },
451 { 0xe020c, 8, RI_ALL_ONLINE },
452 { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
453 { 0xe0280, 1, RI_ALL_ONLINE },
454 { 0xe0300, 1, RI_ALL_ONLINE },
455 { 0xe0400, 1, RI_E3B0_ONLINE },
456 { 0xe1000, 1, RI_ALL_ONLINE },
457 { 0xe2000, 1, RI_ALL_ONLINE },
458 { 0xe2004, 2047, RI_ALL_OFFLINE },
459 { 0xf0000, 1, RI_ALL_ONLINE },
460 { 0xf0004, 16383, RI_ALL_OFFLINE },
461 { 0x101000, 12, RI_ALL_ONLINE },
462 { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
463 { 0x101054, 3, RI_E2E3E3B0_ONLINE },
464 { 0x101100, 1, RI_ALL_ONLINE },
465 { 0x101800, 8, RI_ALL_ONLINE },
466 { 0x102000, 18, RI_ALL_ONLINE },
467 { 0x102068, 6, RI_E2E3E3B0_ONLINE },
468 { 0x102080, 17, RI_ALL_ONLINE },
469 { 0x1020c8, 8, RI_E1H_ONLINE },
470 { 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
471 { 0x102400, 1, RI_ALL_ONLINE },
472 { 0x103000, 26, RI_ALL_ONLINE },
473 { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
474 { 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
475 { 0x1030b4, 1, RI_E2_ONLINE },
476 { 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
477 { 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
478 { 0x103400, 1, RI_E2E3E3B0_ONLINE },
479 { 0x103404, 135, RI_E2E3E3B0_OFFLINE },
480 { 0x103800, 8, RI_ALL_ONLINE },
481 { 0x104000, 63, RI_ALL_ONLINE },
482 { 0x10411c, 16, RI_E2E3E3B0_ONLINE },
483 { 0x104200, 17, RI_ALL_ONLINE },
484 { 0x104400, 64, RI_ALL_ONLINE },
485 { 0x104500, 192, RI_ALL_OFFLINE },
486 { 0x104800, 64, RI_ALL_ONLINE },
487 { 0x104900, 192, RI_ALL_OFFLINE },
488 { 0x105000, 256, RI_ALL_ONLINE },
489 { 0x105400, 768, RI_ALL_OFFLINE },
490 { 0x107000, 7, RI_E2E3E3B0_ONLINE },
491 { 0x10701c, 1, RI_E3E3B0_ONLINE },
492 { 0x108000, 33, RI_E1E1H_ONLINE },
493 { 0x1080ac, 5, RI_E1H_ONLINE },
494 { 0x108100, 5, RI_E1E1H_ONLINE },
495 { 0x108120, 5, RI_E1E1H_ONLINE },
496 { 0x108200, 74, RI_E1E1H_ONLINE },
497 { 0x108400, 74, RI_E1E1H_ONLINE },
498 { 0x108800, 152, RI_E1E1H_ONLINE },
499 { 0x110000, 111, RI_E2E3E3B0_ONLINE },
500 { 0x1101dc, 1, RI_E3E3B0_ONLINE },
501 { 0x110200, 4, RI_E2E3E3B0_ONLINE },
502 { 0x120000, 2, RI_ALL_ONLINE },
503 { 0x120008, 4, RI_ALL_ONLINE },
504 { 0x120018, 3, RI_ALL_ONLINE },
505 { 0x120024, 4, RI_ALL_ONLINE },
506 { 0x120034, 3, RI_ALL_ONLINE },
507 { 0x120040, 4, RI_ALL_ONLINE },
508 { 0x120050, 3, RI_ALL_ONLINE },
509 { 0x12005c, 4, RI_ALL_ONLINE },
510 { 0x12006c, 3, RI_ALL_ONLINE },
511 { 0x120078, 4, RI_ALL_ONLINE },
512 { 0x120088, 3, RI_ALL_ONLINE },
513 { 0x120094, 4, RI_ALL_ONLINE },
514 { 0x1200a4, 3, RI_ALL_ONLINE },
515 { 0x1200b0, 4, RI_ALL_ONLINE },
516 { 0x1200c0, 3, RI_ALL_ONLINE },
517 { 0x1200cc, 4, RI_ALL_ONLINE },
518 { 0x1200dc, 3, RI_ALL_ONLINE },
519 { 0x1200e8, 4, RI_ALL_ONLINE },
520 { 0x1200f8, 3, RI_ALL_ONLINE },
521 { 0x120104, 4, RI_ALL_ONLINE },
522 { 0x120114, 1, RI_ALL_ONLINE },
523 { 0x120118, 22, RI_ALL_ONLINE },
524 { 0x120170, 2, RI_E1E1H_ONLINE },
525 { 0x120178, 243, RI_ALL_ONLINE },
526 { 0x120544, 4, RI_E1E1H_ONLINE },
527 { 0x120554, 6, RI_ALL_ONLINE },
528 { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
529 { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
530 { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
531 { 0x1205f4, 1, RI_E1HE2_ONLINE },
532 { 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
533 { 0x120618, 1, RI_E2E3E3B0_ONLINE },
534 { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
535 { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
536 { 0x120698, 3, RI_E2E3E3B0_ONLINE },
537 { 0x1206a4, 1, RI_E2_ONLINE },
538 { 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
539 { 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
540 { 0x1207dc, 1, RI_E2_ONLINE },
541 { 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
542 { 0x12080c, 65, RI_ALL_ONLINE },
543 { 0x120910, 7, RI_E2E3E3B0_ONLINE },
544 { 0x120930, 9, RI_E2E3E3B0_ONLINE },
545 { 0x12095c, 37, RI_E3E3B0_ONLINE },
546 { 0x120a00, 2, RI_E1E1HE2_ONLINE },
547 { 0x120b00, 1, RI_E3E3B0_ONLINE },
548 { 0x122000, 2, RI_ALL_ONLINE },
549 { 0x122008, 2046, RI_E1_OFFLINE },
550 { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
551 { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
552 { 0x130000, 35, RI_E2E3E3B0_ONLINE },
553 { 0x130100, 29, RI_E2E3E3B0_ONLINE },
554 { 0x130180, 1, RI_E2E3E3B0_ONLINE },
555 { 0x130200, 1, RI_E2E3E3B0_ONLINE },
556 { 0x130280, 1, RI_E2E3E3B0_ONLINE },
557 { 0x130300, 5, RI_E2E3E3B0_ONLINE },
558 { 0x130380, 1, RI_E2E3E3B0_ONLINE },
559 { 0x130400, 1, RI_E2E3E3B0_ONLINE },
560 { 0x130480, 5, RI_E2E3E3B0_ONLINE },
561 { 0x130800, 72, RI_E2E3E3B0_ONLINE },
562 { 0x131000, 136, RI_E2E3E3B0_ONLINE },
563 { 0x132000, 148, RI_E2E3E3B0_ONLINE },
564 { 0x134000, 544, RI_E2E3E3B0_ONLINE },
565 { 0x140000, 1, RI_ALL_ONLINE },
566 { 0x140004, 9, RI_E1E1HE2E3_ONLINE },
567 { 0x140028, 8, RI_ALL_ONLINE },
568 { 0x140048, 10, RI_E1E1HE2E3_ONLINE },
569 { 0x140070, 1, RI_ALL_ONLINE },
570 { 0x140074, 10, RI_E1E1HE2E3_ONLINE },
571 { 0x14009c, 1, RI_ALL_ONLINE },
572 { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
573 { 0x1400b4, 7, RI_ALL_ONLINE },
574 { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
575 { 0x1400f8, 2, RI_ALL_ONLINE },
576 { 0x140100, 5, RI_E1E1H_ONLINE },
577 { 0x140114, 5, RI_E1E1HE2E3_ONLINE },
578 { 0x140128, 7, RI_ALL_ONLINE },
579 { 0x140144, 9, RI_E1E1HE2E3_ONLINE },
580 { 0x140168, 8, RI_ALL_ONLINE },
581 { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
582 { 0x140194, 13, RI_ALL_ONLINE },
583 { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
584 { 0x140220, 4, RI_E2E3_ONLINE },
585 { 0x140240, 4, RI_E2E3_ONLINE },
586 { 0x140260, 4, RI_E2E3_ONLINE },
587 { 0x140280, 4, RI_E2E3_ONLINE },
588 { 0x1402a0, 4, RI_E2E3_ONLINE },
589 { 0x1402c0, 4, RI_E2E3_ONLINE },
590 { 0x1402e0, 2, RI_E2E3_ONLINE },
591 { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
592 { 0x1402f0, 9, RI_E2E3_ONLINE },
593 { 0x140314, 44, RI_E3B0_ONLINE },
594 { 0x1403d0, 70, RI_E3B0_ONLINE },
595 { 0x144000, 4, RI_E1E1H_ONLINE },
596 { 0x148000, 4, RI_E1E1H_ONLINE },
597 { 0x14c000, 4, RI_E1E1H_ONLINE },
598 { 0x150000, 4, RI_E1E1H_ONLINE },
599 { 0x154000, 4, RI_E1E1H_ONLINE },
600 { 0x158000, 4, RI_E1E1H_ONLINE },
601 { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
602 { 0x15c008, 5, RI_E1H_ONLINE },
603 { 0x15c020, 8, RI_E2E3E3B0_ONLINE },
604 { 0x15c040, 1, RI_E2E3_ONLINE },
605 { 0x15c044, 2, RI_E2E3E3B0_ONLINE },
606 { 0x15c04c, 8, RI_E2E3_ONLINE },
607 { 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
608 { 0x15c090, 13, RI_E2E3E3B0_ONLINE },
609 { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
610 { 0x15c128, 2, RI_E2E3_ONLINE },
611 { 0x15c130, 8, RI_E2E3E3B0_ONLINE },
612 { 0x15c150, 2, RI_E3E3B0_ONLINE },
613 { 0x15c158, 2, RI_E3_ONLINE },
614 { 0x15c160, 149, RI_E3B0_ONLINE },
615 { 0x161000, 7, RI_ALL_ONLINE },
616 { 0x16103c, 2, RI_E2E3E3B0_ONLINE },
617 { 0x161800, 2, RI_ALL_ONLINE },
618 { 0x162000, 54, RI_E3E3B0_ONLINE },
619 { 0x162200, 60, RI_E3E3B0_ONLINE },
620 { 0x162400, 54, RI_E3E3B0_ONLINE },
621 { 0x162600, 60, RI_E3E3B0_ONLINE },
622 { 0x162800, 54, RI_E3E3B0_ONLINE },
623 { 0x162a00, 60, RI_E3E3B0_ONLINE },
624 { 0x162c00, 54, RI_E3E3B0_ONLINE },
625 { 0x162e00, 60, RI_E3E3B0_ONLINE },
626 { 0x164000, 60, RI_ALL_ONLINE },
627 { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
628 { 0x164118, 15, RI_E2E3E3B0_ONLINE },
629 { 0x164200, 1, RI_ALL_ONLINE },
630 { 0x164208, 1, RI_ALL_ONLINE },
631 { 0x164210, 1, RI_ALL_ONLINE },
632 { 0x164218, 1, RI_ALL_ONLINE },
633 { 0x164220, 1, RI_ALL_ONLINE },
634 { 0x164228, 1, RI_ALL_ONLINE },
635 { 0x164230, 1, RI_ALL_ONLINE },
636 { 0x164238, 1, RI_ALL_ONLINE },
637 { 0x164240, 1, RI_ALL_ONLINE },
638 { 0x164248, 1, RI_ALL_ONLINE },
639 { 0x164250, 1, RI_ALL_ONLINE },
640 { 0x164258, 1, RI_ALL_ONLINE },
641 { 0x164260, 1, RI_ALL_ONLINE },
642 { 0x164270, 2, RI_ALL_ONLINE },
643 { 0x164280, 2, RI_ALL_ONLINE },
644 { 0x164800, 2, RI_ALL_ONLINE },
645 { 0x165000, 2, RI_ALL_ONLINE },
646 { 0x166000, 164, RI_ALL_ONLINE },
647 { 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
648 { 0x166400, 49, RI_ALL_ONLINE },
649 { 0x1664c8, 38, RI_ALL_ONLINE },
650 { 0x166568, 2, RI_ALL_ONLINE },
651 { 0x166570, 5, RI_E2E3E3B0_ONLINE },
652 { 0x166800, 1, RI_ALL_ONLINE },
653 { 0x168000, 137, RI_ALL_ONLINE },
654 { 0x168224, 2, RI_E1E1H_ONLINE },
655 { 0x16822c, 29, RI_ALL_ONLINE },
656 { 0x1682a0, 12, RI_E1E1H_ONLINE },
657 { 0x1682d0, 12, RI_ALL_ONLINE },
658 { 0x168300, 2, RI_E1E1H_ONLINE },
659 { 0x168308, 68, RI_ALL_ONLINE },
660 { 0x168418, 2, RI_E1E1H_ONLINE },
661 { 0x168420, 6, RI_ALL_ONLINE },
662 { 0x168800, 19, RI_ALL_ONLINE },
663 { 0x168900, 1, RI_ALL_ONLINE },
664 { 0x168a00, 128, RI_ALL_ONLINE },
665 { 0x16a000, 1, RI_ALL_ONLINE },
666 { 0x16a004, 1535, RI_ALL_OFFLINE },
667 { 0x16c000, 1, RI_ALL_ONLINE },
668 { 0x16c004, 1535, RI_ALL_OFFLINE },
669 { 0x16e000, 16, RI_E1H_ONLINE },
670 { 0x16e040, 8, RI_E2E3E3B0_ONLINE },
671 { 0x16e100, 1, RI_E1H_ONLINE },
672 { 0x16e200, 2, RI_E1H_ONLINE },
673 { 0x16e400, 161, RI_E1H_ONLINE },
674 { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
675 { 0x16e68c, 12, RI_E1H_ONLINE },
676 { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
677 { 0x16e6cc, 4, RI_E1H_ONLINE },
678 { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
679 { 0x16e6e8, 5, RI_E2E3_ONLINE },
680 { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
681 { 0x16e768, 17, RI_E2E3E3B0_ONLINE },
682 { 0x16e7ac, 12, RI_E3B0_ONLINE },
683 { 0x170000, 24, RI_ALL_ONLINE },
684 { 0x170060, 4, RI_E1E1H_ONLINE },
685 { 0x170070, 65, RI_ALL_ONLINE },
686 { 0x170194, 11, RI_E2E3E3B0_ONLINE },
687 { 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
688 { 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
689 { 0x1701e8, 1, RI_E3E3B0_ONLINE },
690 { 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
691 { 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
692 { 0x170200, 4, RI_ALL_ONLINE },
693 { 0x170214, 1, RI_ALL_ONLINE },
694 { 0x170218, 77, RI_E2E3E3B0_ONLINE },
695 { 0x170400, 64, RI_E2E3E3B0_ONLINE },
696 { 0x178000, 1, RI_ALL_ONLINE },
697 { 0x180000, 61, RI_ALL_ONLINE },
698 { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
699 { 0x180200, 58, RI_ALL_ONLINE },
700 { 0x180340, 4, RI_ALL_ONLINE },
701 { 0x180380, 1, RI_E2E3E3B0_ONLINE },
702 { 0x180388, 1, RI_E2E3E3B0_ONLINE },
703 { 0x180390, 1, RI_E2E3E3B0_ONLINE },
704 { 0x180398, 1, RI_E2E3E3B0_ONLINE },
705 { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
706 { 0x1803b4, 2, RI_E3E3B0_ONLINE },
707 { 0x180400, 1, RI_ALL_ONLINE },
708 { 0x180404, 255, RI_E1E1H_OFFLINE },
709 { 0x181000, 4, RI_ALL_ONLINE },
710 { 0x181010, 1020, RI_ALL_OFFLINE },
711 { 0x182000, 4, RI_E3E3B0_ONLINE },
712 { 0x1a0000, 1, RI_ALL_ONLINE },
713 { 0x1a0004, 5631, RI_ALL_OFFLINE },
714 { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
715 { 0x1a8000, 1, RI_ALL_ONLINE },
716 { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
717 { 0x1b0000, 1, RI_ALL_ONLINE },
718 { 0x1b0004, 15, RI_E1H_OFFLINE },
719 { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
720 { 0x1b0044, 239, RI_E1H_OFFLINE },
721 { 0x1b0400, 1, RI_ALL_ONLINE },
722 { 0x1b0404, 255, RI_E1H_OFFLINE },
723 { 0x1b0800, 1, RI_ALL_ONLINE },
724 { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
725 { 0x1b0c00, 1, RI_ALL_ONLINE },
726 { 0x1b1000, 1, RI_ALL_ONLINE },
727 { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
728 { 0x1b1400, 1, RI_ALL_ONLINE },
729 { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
730 { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
731 { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
732 { 0x1b1800, 128, RI_ALL_OFFLINE },
733 { 0x1b1c00, 128, RI_ALL_OFFLINE },
734 { 0x1b2000, 1, RI_ALL_ONLINE },
735 { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
736 { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
737 { 0x1b8000, 1, RI_ALL_ONLINE },
738 { 0x1b8040, 1, RI_ALL_ONLINE },
739 { 0x1b8080, 1, RI_ALL_ONLINE },
740 { 0x1b80c0, 1, RI_ALL_ONLINE },
741 { 0x1b8100, 1, RI_ALL_ONLINE },
742 { 0x1b8140, 1, RI_ALL_ONLINE },
743 { 0x1b8180, 1, RI_ALL_ONLINE },
744 { 0x1b81c0, 1, RI_ALL_ONLINE },
745 { 0x1b8200, 1, RI_ALL_ONLINE },
746 { 0x1b8240, 1, RI_ALL_ONLINE },
747 { 0x1b8280, 1, RI_ALL_ONLINE },
748 { 0x1b82c0, 1, RI_ALL_ONLINE },
749 { 0x1b8300, 1, RI_ALL_ONLINE },
750 { 0x1b8340, 1, RI_ALL_ONLINE },
751 { 0x1b8380, 1, RI_ALL_ONLINE },
752 { 0x1b83c0, 1, RI_ALL_ONLINE },
753 { 0x1b8400, 1, RI_ALL_ONLINE },
754 { 0x1b8440, 1, RI_ALL_ONLINE },
755 { 0x1b8480, 1, RI_ALL_ONLINE },
756 { 0x1b84c0, 1, RI_ALL_ONLINE },
757 { 0x1b8500, 1, RI_ALL_ONLINE },
758 { 0x1b8540, 1, RI_ALL_ONLINE },
759 { 0x1b8580, 1, RI_ALL_ONLINE },
760 { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
761 { 0x1b8800, 1, RI_ALL_ONLINE },
762 { 0x1b8840, 1, RI_ALL_ONLINE },
763 { 0x1b8880, 1, RI_ALL_ONLINE },
764 { 0x1b88c0, 1, RI_ALL_ONLINE },
765 { 0x1b8900, 1, RI_ALL_ONLINE },
766 { 0x1b8940, 1, RI_ALL_ONLINE },
767 { 0x1b8980, 1, RI_ALL_ONLINE },
768 { 0x1b89c0, 1, RI_ALL_ONLINE },
769 { 0x1b8a00, 1, RI_ALL_ONLINE },
770 { 0x1b8a40, 1, RI_ALL_ONLINE },
771 { 0x1b8a80, 1, RI_ALL_ONLINE },
772 { 0x1b8ac0, 1, RI_ALL_ONLINE },
773 { 0x1b8b00, 1, RI_ALL_ONLINE },
774 { 0x1b8b40, 1, RI_ALL_ONLINE },
775 { 0x1b8b80, 1, RI_ALL_ONLINE },
776 { 0x1b8bc0, 1, RI_ALL_ONLINE },
777 { 0x1b8c00, 1, RI_ALL_ONLINE },
778 { 0x1b8c40, 1, RI_ALL_ONLINE },
779 { 0x1b8c80, 1, RI_ALL_ONLINE },
780 { 0x1b8cc0, 1, RI_ALL_ONLINE },
781 { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
782 { 0x1b8d00, 1, RI_ALL_ONLINE },
783 { 0x1b8d40, 1, RI_ALL_ONLINE },
784 { 0x1b8d80, 1, RI_ALL_ONLINE },
785 { 0x1b8dc0, 1, RI_ALL_ONLINE },
786 { 0x1b8e00, 1, RI_ALL_ONLINE },
787 { 0x1b8e40, 1, RI_ALL_ONLINE },
788 { 0x1b8e80, 1, RI_ALL_ONLINE },
789 { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
790 { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
791 { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
792 { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
793 { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
794 { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
795 { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
796 { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
797 { 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
798 { 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
799 { 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
800 { 0x1b905c, 1, RI_E3E3B0_ONLINE },
801 { 0x1b9064, 1, RI_E3B0_ONLINE },
802 { 0x1b9080, 10, RI_E3B0_ONLINE },
803 { 0x1b9400, 14, RI_E2E3E3B0_ONLINE },
804 { 0x1b943c, 19, RI_E2E3E3B0_ONLINE },
805 { 0x1b9490, 10, RI_E2E3E3B0_ONLINE },
806 { 0x1c0000, 2, RI_ALL_ONLINE },
807 { 0x200000, 65, RI_ALL_ONLINE },
808 { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
809 { 0x200200, 58, RI_ALL_ONLINE },
810 { 0x200340, 4, RI_ALL_ONLINE },
811 { 0x200380, 1, RI_E2E3E3B0_ONLINE },
812 { 0x200388, 1, RI_E2E3E3B0_ONLINE },
813 { 0x200390, 1, RI_E2E3E3B0_ONLINE },
814 { 0x200398, 1, RI_E2E3E3B0_ONLINE },
815 { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
816 { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
817 { 0x200400, 1, RI_ALL_ONLINE },
818 { 0x200404, 255, RI_E1E1H_OFFLINE },
819 { 0x202000, 4, RI_ALL_ONLINE },
820 { 0x202010, 2044, RI_ALL_OFFLINE },
821 { 0x204000, 4, RI_E3E3B0_ONLINE },
822 { 0x220000, 1, RI_ALL_ONLINE },
823 { 0x220004, 5631, RI_ALL_OFFLINE },
824 { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
825 { 0x228000, 1, RI_ALL_ONLINE },
826 { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
827 { 0x230000, 1, RI_ALL_ONLINE },
828 { 0x230004, 15, RI_E1H_OFFLINE },
829 { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
830 { 0x230044, 239, RI_E1H_OFFLINE },
831 { 0x230400, 1, RI_ALL_ONLINE },
832 { 0x230404, 255, RI_E1H_OFFLINE },
833 { 0x230800, 1, RI_ALL_ONLINE },
834 { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
835 { 0x230c00, 1, RI_ALL_ONLINE },
836 { 0x231000, 1, RI_ALL_ONLINE },
837 { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
838 { 0x231400, 1, RI_ALL_ONLINE },
839 { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
840 { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
841 { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
842 { 0x231800, 128, RI_ALL_OFFLINE },
843 { 0x231c00, 128, RI_ALL_OFFLINE },
844 { 0x232000, 1, RI_ALL_ONLINE },
845 { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
846 { 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
847 { 0x238000, 1, RI_ALL_ONLINE },
848 { 0x238040, 1, RI_ALL_ONLINE },
849 { 0x238080, 1, RI_ALL_ONLINE },
850 { 0x2380c0, 1, RI_ALL_ONLINE },
851 { 0x238100, 1, RI_ALL_ONLINE },
852 { 0x238140, 1, RI_ALL_ONLINE },
853 { 0x238180, 1, RI_ALL_ONLINE },
854 { 0x2381c0, 1, RI_ALL_ONLINE },
855 { 0x238200, 1, RI_ALL_ONLINE },
856 { 0x238240, 1, RI_ALL_ONLINE },
857 { 0x238280, 1, RI_ALL_ONLINE },
858 { 0x2382c0, 1, RI_ALL_ONLINE },
859 { 0x238300, 1, RI_ALL_ONLINE },
860 { 0x238340, 1, RI_ALL_ONLINE },
861 { 0x238380, 1, RI_ALL_ONLINE },
862 { 0x2383c0, 1, RI_ALL_ONLINE },
863 { 0x238400, 1, RI_ALL_ONLINE },
864 { 0x238440, 1, RI_ALL_ONLINE },
865 { 0x238480, 1, RI_ALL_ONLINE },
866 { 0x2384c0, 1, RI_ALL_ONLINE },
867 { 0x238500, 1, RI_ALL_ONLINE },
868 { 0x238540, 1, RI_ALL_ONLINE },
869 { 0x238580, 1, RI_ALL_ONLINE },
870 { 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
871 { 0x238800, 1, RI_ALL_ONLINE },
872 { 0x238840, 1, RI_ALL_ONLINE },
873 { 0x238880, 1, RI_ALL_ONLINE },
874 { 0x2388c0, 1, RI_ALL_ONLINE },
875 { 0x238900, 1, RI_ALL_ONLINE },
876 { 0x238940, 1, RI_ALL_ONLINE },
877 { 0x238980, 1, RI_ALL_ONLINE },
878 { 0x2389c0, 1, RI_ALL_ONLINE },
879 { 0x238a00, 1, RI_ALL_ONLINE },
880 { 0x238a40, 1, RI_ALL_ONLINE },
881 { 0x238a80, 1, RI_ALL_ONLINE },
882 { 0x238ac0, 1, RI_ALL_ONLINE },
883 { 0x238b00, 1, RI_ALL_ONLINE },
884 { 0x238b40, 1, RI_ALL_ONLINE },
885 { 0x238b80, 1, RI_ALL_ONLINE },
886 { 0x238bc0, 1, RI_ALL_ONLINE },
887 { 0x238c00, 1, RI_ALL_ONLINE },
888 { 0x238c40, 1, RI_ALL_ONLINE },
889 { 0x238c80, 1, RI_ALL_ONLINE },
890 { 0x238cc0, 1, RI_ALL_ONLINE },
891 { 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
892 { 0x238d00, 1, RI_ALL_ONLINE },
893 { 0x238d40, 1, RI_ALL_ONLINE },
894 { 0x238d80, 1, RI_ALL_ONLINE },
895 { 0x238dc0, 1, RI_ALL_ONLINE },
896 { 0x238e00, 1, RI_ALL_ONLINE },
897 { 0x238e40, 1, RI_ALL_ONLINE },
898 { 0x238e80, 1, RI_ALL_ONLINE },
899 { 0x238e84, 1, RI_E2E3E3B0_ONLINE },
900 { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
901 { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
902 { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
903 { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
904 { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
905 { 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
906 { 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
907 { 0x238fe8, 2, RI_E3E3B0_ONLINE },
908 { 0x239000, 1, RI_E2E3E3B0_ONLINE },
909 { 0x239040, 3, RI_E2E3E3B0_ONLINE },
910 { 0x23905c, 1, RI_E3E3B0_ONLINE },
911 { 0x239064, 1, RI_E3B0_ONLINE },
912 { 0x239080, 10, RI_E3B0_ONLINE },
913 { 0x240000, 2, RI_ALL_ONLINE },
914 { 0x280000, 65, RI_ALL_ONLINE },
915 { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
916 { 0x280200, 58, RI_ALL_ONLINE },
917 { 0x280340, 4, RI_ALL_ONLINE },
918 { 0x280380, 1, RI_E2E3E3B0_ONLINE },
919 { 0x280388, 1, RI_E2E3E3B0_ONLINE },
920 { 0x280390, 1, RI_E2E3E3B0_ONLINE },
921 { 0x280398, 1, RI_E2E3E3B0_ONLINE },
922 { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
923 { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
924 { 0x280400, 1, RI_ALL_ONLINE },
925 { 0x280404, 255, RI_E1E1H_OFFLINE },
926 { 0x282000, 4, RI_ALL_ONLINE },
927 { 0x282010, 2044, RI_ALL_OFFLINE },
928 { 0x284000, 4, RI_E3E3B0_ONLINE },
929 { 0x2a0000, 1, RI_ALL_ONLINE },
930 { 0x2a0004, 5631, RI_ALL_OFFLINE },
931 { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
932 { 0x2a8000, 1, RI_ALL_ONLINE },
933 { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
934 { 0x2b0000, 1, RI_ALL_ONLINE },
935 { 0x2b0004, 15, RI_E1H_OFFLINE },
936 { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
937 { 0x2b0044, 239, RI_E1H_OFFLINE },
938 { 0x2b0400, 1, RI_ALL_ONLINE },
939 { 0x2b0404, 255, RI_E1H_OFFLINE },
940 { 0x2b0800, 1, RI_ALL_ONLINE },
941 { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
942 { 0x2b0c00, 1, RI_ALL_ONLINE },
943 { 0x2b1000, 1, RI_ALL_ONLINE },
944 { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
945 { 0x2b1400, 1, RI_ALL_ONLINE },
946 { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
947 { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
948 { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
949 { 0x2b1800, 128, RI_ALL_OFFLINE },
950 { 0x2b1c00, 128, RI_ALL_OFFLINE },
951 { 0x2b2000, 1, RI_ALL_ONLINE },
952 { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
953 { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
954 { 0x2b8000, 1, RI_ALL_ONLINE },
955 { 0x2b8040, 1, RI_ALL_ONLINE },
956 { 0x2b8080, 1, RI_ALL_ONLINE },
957 { 0x2b80c0, 1, RI_ALL_ONLINE },
958 { 0x2b8100, 1, RI_ALL_ONLINE },
959 { 0x2b8140, 1, RI_ALL_ONLINE },
960 { 0x2b8180, 1, RI_ALL_ONLINE },
961 { 0x2b81c0, 1, RI_ALL_ONLINE },
962 { 0x2b8200, 1, RI_ALL_ONLINE },
963 { 0x2b8240, 1, RI_ALL_ONLINE },
964 { 0x2b8280, 1, RI_ALL_ONLINE },
965 { 0x2b82c0, 1, RI_ALL_ONLINE },
966 { 0x2b8300, 1, RI_ALL_ONLINE },
967 { 0x2b8340, 1, RI_ALL_ONLINE },
968 { 0x2b8380, 1, RI_ALL_ONLINE },
969 { 0x2b83c0, 1, RI_ALL_ONLINE },
970 { 0x2b8400, 1, RI_ALL_ONLINE },
971 { 0x2b8440, 1, RI_ALL_ONLINE },
972 { 0x2b8480, 1, RI_ALL_ONLINE },
973 { 0x2b84c0, 1, RI_ALL_ONLINE },
974 { 0x2b8500, 1, RI_ALL_ONLINE },
975 { 0x2b8540, 1, RI_ALL_ONLINE },
976 { 0x2b8580, 1, RI_ALL_ONLINE },
977 { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
978 { 0x2b8800, 1, RI_ALL_ONLINE },
979 { 0x2b8840, 1, RI_ALL_ONLINE },
980 { 0x2b8880, 1, RI_ALL_ONLINE },
981 { 0x2b88c0, 1, RI_ALL_ONLINE },
982 { 0x2b8900, 1, RI_ALL_ONLINE },
983 { 0x2b8940, 1, RI_ALL_ONLINE },
984 { 0x2b8980, 1, RI_ALL_ONLINE },
985 { 0x2b89c0, 1, RI_ALL_ONLINE },
986 { 0x2b8a00, 1, RI_ALL_ONLINE },
987 { 0x2b8a40, 1, RI_ALL_ONLINE },
988 { 0x2b8a80, 1, RI_ALL_ONLINE },
989 { 0x2b8ac0, 1, RI_ALL_ONLINE },
990 { 0x2b8b00, 1, RI_ALL_ONLINE },
991 { 0x2b8b40, 1, RI_ALL_ONLINE },
992 { 0x2b8b80, 1, RI_ALL_ONLINE },
993 { 0x2b8bc0, 1, RI_ALL_ONLINE },
994 { 0x2b8c00, 1, RI_ALL_ONLINE },
995 { 0x2b8c40, 1, RI_ALL_ONLINE },
996 { 0x2b8c80, 1, RI_ALL_ONLINE },
997 { 0x2b8cc0, 1, RI_ALL_ONLINE },
998 { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
999 { 0x2b8d00, 1, RI_ALL_ONLINE },
1000 { 0x2b8d40, 1, RI_ALL_ONLINE },
1001 { 0x2b8d80, 1, RI_ALL_ONLINE },
1002 { 0x2b8dc0, 1, RI_ALL_ONLINE },
1003 { 0x2b8e00, 1, RI_ALL_ONLINE },
1004 { 0x2b8e40, 1, RI_ALL_ONLINE },
1005 { 0x2b8e80, 1, RI_ALL_ONLINE },
1006 { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
1007 { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
1008 { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
1009 { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
1010 { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
1011 { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
1012 { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
1013 { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
1014 { 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
1015 { 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
1016 { 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
1017 { 0x2b905c, 1, RI_E3E3B0_ONLINE },
1018 { 0x2b9064, 1, RI_E3B0_ONLINE },
1019 { 0x2b9080, 10, RI_E3B0_ONLINE },
1020 { 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
1021 { 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
1022 { 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
1023 { 0x2c0000, 2, RI_ALL_ONLINE },
1024 { 0x300000, 65, RI_ALL_ONLINE },
1025 { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
1026 { 0x300200, 58, RI_ALL_ONLINE },
1027 { 0x300340, 4, RI_ALL_ONLINE },
1028 { 0x300380, 1, RI_E2E3E3B0_ONLINE },
1029 { 0x300388, 1, RI_E2E3E3B0_ONLINE },
1030 { 0x300390, 1, RI_E2E3E3B0_ONLINE },
1031 { 0x300398, 1, RI_E2E3E3B0_ONLINE },
1032 { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
1033 { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
1034 { 0x300400, 1, RI_ALL_ONLINE },
1035 { 0x300404, 255, RI_E1E1H_OFFLINE },
1036 { 0x302000, 4, RI_ALL_ONLINE },
1037 { 0x302010, 2044, RI_ALL_OFFLINE },
1038 { 0x304000, 4, RI_E3E3B0_ONLINE },
1039 { 0x320000, 1, RI_ALL_ONLINE },
1040 { 0x320004, 5631, RI_ALL_OFFLINE },
1041 { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
1042 { 0x328000, 1, RI_ALL_ONLINE },
1043 { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
1044 { 0x330000, 1, RI_ALL_ONLINE },
1045 { 0x330004, 15, RI_E1H_OFFLINE },
1046 { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
1047 { 0x330044, 239, RI_E1H_OFFLINE },
1048 { 0x330400, 1, RI_ALL_ONLINE },
1049 { 0x330404, 255, RI_E1H_OFFLINE },
1050 { 0x330800, 1, RI_ALL_ONLINE },
1051 { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
1052 { 0x330c00, 1, RI_ALL_ONLINE },
1053 { 0x331000, 1, RI_ALL_ONLINE },
1054 { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
1055 { 0x331400, 1, RI_ALL_ONLINE },
1056 { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
1057 { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
1058 { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
1059 { 0x331800, 128, RI_ALL_OFFLINE },
1060 { 0x331c00, 128, RI_ALL_OFFLINE },
1061 { 0x332000, 1, RI_ALL_ONLINE },
1062 { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
1063 { 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
1064 { 0x338000, 1, RI_ALL_ONLINE },
1065 { 0x338040, 1, RI_ALL_ONLINE },
1066 { 0x338080, 1, RI_ALL_ONLINE },
1067 { 0x3380c0, 1, RI_ALL_ONLINE },
1068 { 0x338100, 1, RI_ALL_ONLINE },
1069 { 0x338140, 1, RI_ALL_ONLINE },
1070 { 0x338180, 1, RI_ALL_ONLINE },
1071 { 0x3381c0, 1, RI_ALL_ONLINE },
1072 { 0x338200, 1, RI_ALL_ONLINE },
1073 { 0x338240, 1, RI_ALL_ONLINE },
1074 { 0x338280, 1, RI_ALL_ONLINE },
1075 { 0x3382c0, 1, RI_ALL_ONLINE },
1076 { 0x338300, 1, RI_ALL_ONLINE },
1077 { 0x338340, 1, RI_ALL_ONLINE },
1078 { 0x338380, 1, RI_ALL_ONLINE },
1079 { 0x3383c0, 1, RI_ALL_ONLINE },
1080 { 0x338400, 1, RI_ALL_ONLINE },
1081 { 0x338440, 1, RI_ALL_ONLINE },
1082 { 0x338480, 1, RI_ALL_ONLINE },
1083 { 0x3384c0, 1, RI_ALL_ONLINE },
1084 { 0x338500, 1, RI_ALL_ONLINE },
1085 { 0x338540, 1, RI_ALL_ONLINE },
1086 { 0x338580, 1, RI_ALL_ONLINE },
1087 { 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
1088 { 0x338800, 1, RI_ALL_ONLINE },
1089 { 0x338840, 1, RI_ALL_ONLINE },
1090 { 0x338880, 1, RI_ALL_ONLINE },
1091 { 0x3388c0, 1, RI_ALL_ONLINE },
1092 { 0x338900, 1, RI_ALL_ONLINE },
1093 { 0x338940, 1, RI_ALL_ONLINE },
1094 { 0x338980, 1, RI_ALL_ONLINE },
1095 { 0x3389c0, 1, RI_ALL_ONLINE },
1096 { 0x338a00, 1, RI_ALL_ONLINE },
1097 { 0x338a40, 1, RI_ALL_ONLINE },
1098 { 0x338a80, 1, RI_ALL_ONLINE },
1099 { 0x338ac0, 1, RI_ALL_ONLINE },
1100 { 0x338b00, 1, RI_ALL_ONLINE },
1101 { 0x338b40, 1, RI_ALL_ONLINE },
1102 { 0x338b80, 1, RI_ALL_ONLINE },
1103 { 0x338bc0, 1, RI_ALL_ONLINE },
1104 { 0x338c00, 1, RI_ALL_ONLINE },
1105 { 0x338c40, 1, RI_ALL_ONLINE },
1106 { 0x338c80, 1, RI_ALL_ONLINE },
1107 { 0x338cc0, 1, RI_ALL_ONLINE },
1108 { 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
1109 { 0x338d00, 1, RI_ALL_ONLINE },
1110 { 0x338d40, 1, RI_ALL_ONLINE },
1111 { 0x338d80, 1, RI_ALL_ONLINE },
1112 { 0x338dc0, 1, RI_ALL_ONLINE },
1113 { 0x338e00, 1, RI_ALL_ONLINE },
1114 { 0x338e40, 1, RI_ALL_ONLINE },
1115 { 0x338e80, 1, RI_ALL_ONLINE },
1116 { 0x338e84, 1, RI_E2E3E3B0_ONLINE },
1117 { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
1118 { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
1119 { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
1120 { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
1121 { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
1122 { 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
1123 { 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
1124 { 0x338fe8, 2, RI_E3E3B0_ONLINE },
1125 { 0x339000, 1, RI_E2E3E3B0_ONLINE },
1126 { 0x339040, 3, RI_E2E3E3B0_ONLINE },
1127 { 0x33905c, 1, RI_E3E3B0_ONLINE },
1128 { 0x339064, 1, RI_E3B0_ONLINE },
1129 { 0x339080, 10, RI_E3B0_ONLINE },
1130 { 0x340000, 2, RI_ALL_ONLINE },
1131};
1132#define REGS_COUNT ARRAY_SIZE(reg_addrs)
1133
1134static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
1135
1136static const u32 page_vals_e2[] = { 0, 128 };
1137#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2)
1138
1139static const u32 page_write_regs_e2[] = { 328476 };
1140#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2)
1141
1142static const struct reg_addr page_read_regs_e2[] = {
1143 { 0x58000, 4608, RI_E2_ONLINE } };
1144#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2)
1145
1146static const u32 page_vals_e3[] = { 0, 128 };
1147#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3)
1148
1149static const u32 page_write_regs_e3[] = { 328476 };
1150#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3)
1151
1152static const struct reg_addr page_read_regs_e3[] = {
1153 { 0x58000, 4608, RI_E3E3B0_ONLINE } };
1154#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3)
1155
1156#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 00000000000..cf3e47914dd
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,2389 @@
1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#include <linux/ethtool.h>
18#include <linux/netdevice.h>
19#include <linux/types.h>
20#include <linux/sched.h>
21#include <linux/crc32.h>
22
23
24#include "bnx2x.h"
25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h"
27#include "bnx2x_init.h"
28#include "bnx2x_sp.h"
29
30/* Note: in the format strings below %s is replaced by the queue-name which is
31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
32 * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
33 */
34#define MAX_QUEUE_NAME_LEN 4
35static const struct {
36 long offset;
37 int size;
38 char string[ETH_GSTRING_LEN];
39} bnx2x_q_stats_arr[] = {
40/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
41 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
42 8, "[%s]: rx_ucast_packets" },
43 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
44 8, "[%s]: rx_mcast_packets" },
45 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
46 8, "[%s]: rx_bcast_packets" },
47 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
48 { Q_STATS_OFFSET32(rx_err_discard_pkt),
49 4, "[%s]: rx_phy_ip_err_discards"},
50 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
51 4, "[%s]: rx_skb_alloc_discard" },
52 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
53
54 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
55/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
56 8, "[%s]: tx_ucast_packets" },
57 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
58 8, "[%s]: tx_mcast_packets" },
59 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
60 8, "[%s]: tx_bcast_packets" },
61 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
62 8, "[%s]: tpa_aggregations" },
63 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
64 8, "[%s]: tpa_aggregated_frames"},
65 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}
66};
67
68#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
69
70static const struct {
71 long offset;
72 int size;
73 u32 flags;
74#define STATS_FLAGS_PORT 1
75#define STATS_FLAGS_FUNC 2
76#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
77 char string[ETH_GSTRING_LEN];
78} bnx2x_stats_arr[] = {
79/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
80 8, STATS_FLAGS_BOTH, "rx_bytes" },
81 { STATS_OFFSET32(error_bytes_received_hi),
82 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
83 { STATS_OFFSET32(total_unicast_packets_received_hi),
84 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
85 { STATS_OFFSET32(total_multicast_packets_received_hi),
86 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
87 { STATS_OFFSET32(total_broadcast_packets_received_hi),
88 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
89 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
90 8, STATS_FLAGS_PORT, "rx_crc_errors" },
91 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
92 8, STATS_FLAGS_PORT, "rx_align_errors" },
93 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
94 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
95 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
96 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
97/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
98 8, STATS_FLAGS_PORT, "rx_fragments" },
99 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
100 8, STATS_FLAGS_PORT, "rx_jabbers" },
101 { STATS_OFFSET32(no_buff_discard_hi),
102 8, STATS_FLAGS_BOTH, "rx_discards" },
103 { STATS_OFFSET32(mac_filter_discard),
104 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
105 { STATS_OFFSET32(mf_tag_discard),
106 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
107 { STATS_OFFSET32(brb_drop_hi),
108 8, STATS_FLAGS_PORT, "rx_brb_discard" },
109 { STATS_OFFSET32(brb_truncate_hi),
110 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
111 { STATS_OFFSET32(pause_frames_received_hi),
112 8, STATS_FLAGS_PORT, "rx_pause_frames" },
113 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
114 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
115 { STATS_OFFSET32(nig_timer_max),
116 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
117/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
118 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
119 { STATS_OFFSET32(rx_skb_alloc_failed),
120 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
121 { STATS_OFFSET32(hw_csum_err),
122 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
123
124 { STATS_OFFSET32(total_bytes_transmitted_hi),
125 8, STATS_FLAGS_BOTH, "tx_bytes" },
126 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
127 8, STATS_FLAGS_PORT, "tx_error_bytes" },
128 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
129 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
130 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
131 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
132 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
133 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
134 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
135 8, STATS_FLAGS_PORT, "tx_mac_errors" },
136 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
137 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
138/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
139 8, STATS_FLAGS_PORT, "tx_single_collisions" },
140 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
141 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
142 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
143 8, STATS_FLAGS_PORT, "tx_deferred" },
144 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
145 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
146 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
147 8, STATS_FLAGS_PORT, "tx_late_collisions" },
148 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
149 8, STATS_FLAGS_PORT, "tx_total_collisions" },
150 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
151 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
152 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
153 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
154 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
155 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
156 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
157 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
158/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
159 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
160 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
161 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
162 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
163 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
164 { STATS_OFFSET32(pause_frames_sent_hi),
165 8, STATS_FLAGS_PORT, "tx_pause_frames" },
166 { STATS_OFFSET32(total_tpa_aggregations_hi),
167 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
168 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
169 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
170 { STATS_OFFSET32(total_tpa_bytes_hi),
171 8, STATS_FLAGS_FUNC, "tpa_bytes"}
172};
173
174#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
175static int bnx2x_get_port_type(struct bnx2x *bp)
176{
177 int port_type;
178 u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
179 switch (bp->link_params.phy[phy_idx].media_type) {
180 case ETH_PHY_SFP_FIBER:
181 case ETH_PHY_XFP_FIBER:
182 case ETH_PHY_KR:
183 case ETH_PHY_CX4:
184 port_type = PORT_FIBRE;
185 break;
186 case ETH_PHY_DA_TWINAX:
187 port_type = PORT_DA;
188 break;
189 case ETH_PHY_BASE_T:
190 port_type = PORT_TP;
191 break;
192 case ETH_PHY_NOT_PRESENT:
193 port_type = PORT_NONE;
194 break;
195 case ETH_PHY_UNSPECIFIED:
196 default:
197 port_type = PORT_OTHER;
198 break;
199 }
200 return port_type;
201}
202
203static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
204{
205 struct bnx2x *bp = netdev_priv(dev);
206 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
207
208 /* Dual Media boards present all available port types */
209 cmd->supported = bp->port.supported[cfg_idx] |
210 (bp->port.supported[cfg_idx ^ 1] &
211 (SUPPORTED_TP | SUPPORTED_FIBRE));
212 cmd->advertising = bp->port.advertising[cfg_idx];
213
214 if ((bp->state == BNX2X_STATE_OPEN) &&
215 !(bp->flags & MF_FUNC_DIS) &&
216 (bp->link_vars.link_up)) {
217 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
218 cmd->duplex = bp->link_vars.duplex;
219 } else {
220 ethtool_cmd_speed_set(
221 cmd, bp->link_params.req_line_speed[cfg_idx]);
222 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
223 }
224
225 if (IS_MF(bp))
226 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
227
228 cmd->port = bnx2x_get_port_type(bp);
229
230 cmd->phy_address = bp->mdio.prtad;
231 cmd->transceiver = XCVR_INTERNAL;
232
233 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
234 cmd->autoneg = AUTONEG_ENABLE;
235 else
236 cmd->autoneg = AUTONEG_DISABLE;
237
238 cmd->maxtxpkt = 0;
239 cmd->maxrxpkt = 0;
240
241 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
242 DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n"
243 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
244 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
245 cmd->cmd, cmd->supported, cmd->advertising,
246 ethtool_cmd_speed(cmd),
247 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
248 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
249
250 return 0;
251}
252
253static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
254{
255 struct bnx2x *bp = netdev_priv(dev);
256 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
257 u32 speed;
258
259 if (IS_MF_SD(bp))
260 return 0;
261
262 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
263 " supported 0x%x advertising 0x%x speed %u\n"
264 " duplex %d port %d phy_address %d transceiver %d\n"
265 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
266 cmd->cmd, cmd->supported, cmd->advertising,
267 ethtool_cmd_speed(cmd),
268 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
269 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
270
271 speed = ethtool_cmd_speed(cmd);
272
273 if (IS_MF_SI(bp)) {
274 u32 part;
275 u32 line_speed = bp->link_vars.line_speed;
276
277 /* use 10G if no link detected */
278 if (!line_speed)
279 line_speed = 10000;
280
281 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
282 BNX2X_DEV_INFO("To set speed BC %X or higher "
283 "is required, please upgrade BC\n",
284 REQ_BC_VER_4_SET_MF_BW);
285 return -EINVAL;
286 }
287
288 part = (speed * 100) / line_speed;
289
290 if (line_speed < speed || !part) {
291 BNX2X_DEV_INFO("Speed setting should be in a range "
292 "from 1%% to 100%% "
293 "of actual line speed\n");
294 return -EINVAL;
295 }
296
297 if (bp->state != BNX2X_STATE_OPEN)
298 /* store value for following "load" */
299 bp->pending_max = part;
300 else
301 bnx2x_update_max_mf_config(bp, part);
302
303 return 0;
304 }
305
306 cfg_idx = bnx2x_get_link_cfg_idx(bp);
307 old_multi_phy_config = bp->link_params.multi_phy_config;
308 switch (cmd->port) {
309 case PORT_TP:
310 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
311 break; /* no port change */
312
313 if (!(bp->port.supported[0] & SUPPORTED_TP ||
314 bp->port.supported[1] & SUPPORTED_TP)) {
315 DP(NETIF_MSG_LINK, "Unsupported port type\n");
316 return -EINVAL;
317 }
318 bp->link_params.multi_phy_config &=
319 ~PORT_HW_CFG_PHY_SELECTION_MASK;
320 if (bp->link_params.multi_phy_config &
321 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
322 bp->link_params.multi_phy_config |=
323 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
324 else
325 bp->link_params.multi_phy_config |=
326 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
327 break;
328 case PORT_FIBRE:
329 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
330 break; /* no port change */
331
332 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
333 bp->port.supported[1] & SUPPORTED_FIBRE)) {
334 DP(NETIF_MSG_LINK, "Unsupported port type\n");
335 return -EINVAL;
336 }
337 bp->link_params.multi_phy_config &=
338 ~PORT_HW_CFG_PHY_SELECTION_MASK;
339 if (bp->link_params.multi_phy_config &
340 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
341 bp->link_params.multi_phy_config |=
342 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
343 else
344 bp->link_params.multi_phy_config |=
345 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
346 break;
347 default:
348 DP(NETIF_MSG_LINK, "Unsupported port type\n");
349 return -EINVAL;
350 }
351 /* Save new config in case command complete successuly */
352 new_multi_phy_config = bp->link_params.multi_phy_config;
353 /* Get the new cfg_idx */
354 cfg_idx = bnx2x_get_link_cfg_idx(bp);
355 /* Restore old config in case command failed */
356 bp->link_params.multi_phy_config = old_multi_phy_config;
357 DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
358
359 if (cmd->autoneg == AUTONEG_ENABLE) {
360 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
361 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
362 return -EINVAL;
363 }
364
365 /* advertise the requested speed and duplex if supported */
366 if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
367 DP(NETIF_MSG_LINK, "Advertisement parameters "
368 "are not supported\n");
369 return -EINVAL;
370 }
371
372 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
373 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
374 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
375 cmd->advertising);
376 if (cmd->advertising) {
377
378 bp->link_params.speed_cap_mask[cfg_idx] = 0;
379 if (cmd->advertising & ADVERTISED_10baseT_Half) {
380 bp->link_params.speed_cap_mask[cfg_idx] |=
381 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
382 }
383 if (cmd->advertising & ADVERTISED_10baseT_Full)
384 bp->link_params.speed_cap_mask[cfg_idx] |=
385 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
386
387 if (cmd->advertising & ADVERTISED_100baseT_Full)
388 bp->link_params.speed_cap_mask[cfg_idx] |=
389 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
390
391 if (cmd->advertising & ADVERTISED_100baseT_Half) {
392 bp->link_params.speed_cap_mask[cfg_idx] |=
393 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
394 }
395 if (cmd->advertising & ADVERTISED_1000baseT_Half) {
396 bp->link_params.speed_cap_mask[cfg_idx] |=
397 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
398 }
399 if (cmd->advertising & (ADVERTISED_1000baseT_Full |
400 ADVERTISED_1000baseKX_Full))
401 bp->link_params.speed_cap_mask[cfg_idx] |=
402 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
403
404 if (cmd->advertising & (ADVERTISED_10000baseT_Full |
405 ADVERTISED_10000baseKX4_Full |
406 ADVERTISED_10000baseKR_Full))
407 bp->link_params.speed_cap_mask[cfg_idx] |=
408 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
409 }
410 } else { /* forced speed */
411 /* advertise the requested speed and duplex if supported */
412 switch (speed) {
413 case SPEED_10:
414 if (cmd->duplex == DUPLEX_FULL) {
415 if (!(bp->port.supported[cfg_idx] &
416 SUPPORTED_10baseT_Full)) {
417 DP(NETIF_MSG_LINK,
418 "10M full not supported\n");
419 return -EINVAL;
420 }
421
422 advertising = (ADVERTISED_10baseT_Full |
423 ADVERTISED_TP);
424 } else {
425 if (!(bp->port.supported[cfg_idx] &
426 SUPPORTED_10baseT_Half)) {
427 DP(NETIF_MSG_LINK,
428 "10M half not supported\n");
429 return -EINVAL;
430 }
431
432 advertising = (ADVERTISED_10baseT_Half |
433 ADVERTISED_TP);
434 }
435 break;
436
437 case SPEED_100:
438 if (cmd->duplex == DUPLEX_FULL) {
439 if (!(bp->port.supported[cfg_idx] &
440 SUPPORTED_100baseT_Full)) {
441 DP(NETIF_MSG_LINK,
442 "100M full not supported\n");
443 return -EINVAL;
444 }
445
446 advertising = (ADVERTISED_100baseT_Full |
447 ADVERTISED_TP);
448 } else {
449 if (!(bp->port.supported[cfg_idx] &
450 SUPPORTED_100baseT_Half)) {
451 DP(NETIF_MSG_LINK,
452 "100M half not supported\n");
453 return -EINVAL;
454 }
455
456 advertising = (ADVERTISED_100baseT_Half |
457 ADVERTISED_TP);
458 }
459 break;
460
461 case SPEED_1000:
462 if (cmd->duplex != DUPLEX_FULL) {
463 DP(NETIF_MSG_LINK, "1G half not supported\n");
464 return -EINVAL;
465 }
466
467 if (!(bp->port.supported[cfg_idx] &
468 SUPPORTED_1000baseT_Full)) {
469 DP(NETIF_MSG_LINK, "1G full not supported\n");
470 return -EINVAL;
471 }
472
473 advertising = (ADVERTISED_1000baseT_Full |
474 ADVERTISED_TP);
475 break;
476
477 case SPEED_2500:
478 if (cmd->duplex != DUPLEX_FULL) {
479 DP(NETIF_MSG_LINK,
480 "2.5G half not supported\n");
481 return -EINVAL;
482 }
483
484 if (!(bp->port.supported[cfg_idx]
485 & SUPPORTED_2500baseX_Full)) {
486 DP(NETIF_MSG_LINK,
487 "2.5G full not supported\n");
488 return -EINVAL;
489 }
490
491 advertising = (ADVERTISED_2500baseX_Full |
492 ADVERTISED_TP);
493 break;
494
495 case SPEED_10000:
496 if (cmd->duplex != DUPLEX_FULL) {
497 DP(NETIF_MSG_LINK, "10G half not supported\n");
498 return -EINVAL;
499 }
500
501 if (!(bp->port.supported[cfg_idx]
502 & SUPPORTED_10000baseT_Full)) {
503 DP(NETIF_MSG_LINK, "10G full not supported\n");
504 return -EINVAL;
505 }
506
507 advertising = (ADVERTISED_10000baseT_Full |
508 ADVERTISED_FIBRE);
509 break;
510
511 default:
512 DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
513 return -EINVAL;
514 }
515
516 bp->link_params.req_line_speed[cfg_idx] = speed;
517 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
518 bp->port.advertising[cfg_idx] = advertising;
519 }
520
521 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
522 DP_LEVEL " req_duplex %d advertising 0x%x\n",
523 bp->link_params.req_line_speed[cfg_idx],
524 bp->link_params.req_duplex[cfg_idx],
525 bp->port.advertising[cfg_idx]);
526
527 /* Set new config */
528 bp->link_params.multi_phy_config = new_multi_phy_config;
529 if (netif_running(dev)) {
530 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
531 bnx2x_link_set(bp);
532 }
533
534 return 0;
535}
536
537#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
538#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
539#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
540#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
541#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
542
543static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
544 const struct reg_addr *reg_info)
545{
546 if (CHIP_IS_E1(bp))
547 return IS_E1_ONLINE(reg_info->info);
548 else if (CHIP_IS_E1H(bp))
549 return IS_E1H_ONLINE(reg_info->info);
550 else if (CHIP_IS_E2(bp))
551 return IS_E2_ONLINE(reg_info->info);
552 else if (CHIP_IS_E3A0(bp))
553 return IS_E3_ONLINE(reg_info->info);
554 else if (CHIP_IS_E3B0(bp))
555 return IS_E3B0_ONLINE(reg_info->info);
556 else
557 return false;
558}
559
560/******* Paged registers info selectors ********/
561static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
562{
563 if (CHIP_IS_E2(bp))
564 return page_vals_e2;
565 else if (CHIP_IS_E3(bp))
566 return page_vals_e3;
567 else
568 return NULL;
569}
570
571static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
572{
573 if (CHIP_IS_E2(bp))
574 return PAGE_MODE_VALUES_E2;
575 else if (CHIP_IS_E3(bp))
576 return PAGE_MODE_VALUES_E3;
577 else
578 return 0;
579}
580
581static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
582{
583 if (CHIP_IS_E2(bp))
584 return page_write_regs_e2;
585 else if (CHIP_IS_E3(bp))
586 return page_write_regs_e3;
587 else
588 return NULL;
589}
590
591static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
592{
593 if (CHIP_IS_E2(bp))
594 return PAGE_WRITE_REGS_E2;
595 else if (CHIP_IS_E3(bp))
596 return PAGE_WRITE_REGS_E3;
597 else
598 return 0;
599}
600
601static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
602{
603 if (CHIP_IS_E2(bp))
604 return page_read_regs_e2;
605 else if (CHIP_IS_E3(bp))
606 return page_read_regs_e3;
607 else
608 return NULL;
609}
610
611static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
612{
613 if (CHIP_IS_E2(bp))
614 return PAGE_READ_REGS_E2;
615 else if (CHIP_IS_E3(bp))
616 return PAGE_READ_REGS_E3;
617 else
618 return 0;
619}
620
621static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
622{
623 int num_pages = __bnx2x_get_page_reg_num(bp);
624 int page_write_num = __bnx2x_get_page_write_num(bp);
625 const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
626 int page_read_num = __bnx2x_get_page_read_num(bp);
627 int regdump_len = 0;
628 int i, j, k;
629
630 for (i = 0; i < REGS_COUNT; i++)
631 if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
632 regdump_len += reg_addrs[i].size;
633
634 for (i = 0; i < num_pages; i++)
635 for (j = 0; j < page_write_num; j++)
636 for (k = 0; k < page_read_num; k++)
637 if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
638 regdump_len += page_read_addr[k].size;
639
640 return regdump_len;
641}
642
643static int bnx2x_get_regs_len(struct net_device *dev)
644{
645 struct bnx2x *bp = netdev_priv(dev);
646 int regdump_len = 0;
647
648 regdump_len = __bnx2x_get_regs_len(bp);
649 regdump_len *= 4;
650 regdump_len += sizeof(struct dump_hdr);
651
652 return regdump_len;
653}
654
655/**
656 * bnx2x_read_pages_regs - read "paged" registers
657 *
658 * @bp device handle
659 * @p output buffer
660 *
661 * Reads "paged" memories: memories that may only be read by first writing to a
662 * specific address ("write address") and then reading from a specific address
663 * ("read address"). There may be more than one write address per "page" and
664 * more than one read address per write address.
665 */
666static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
667{
668 u32 i, j, k, n;
669 /* addresses of the paged registers */
670 const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
671 /* number of paged registers */
672 int num_pages = __bnx2x_get_page_reg_num(bp);
673 /* write addresses */
674 const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
675 /* number of write addresses */
676 int write_num = __bnx2x_get_page_write_num(bp);
677 /* read addresses info */
678 const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
679 /* number of read addresses */
680 int read_num = __bnx2x_get_page_read_num(bp);
681
682 for (i = 0; i < num_pages; i++) {
683 for (j = 0; j < write_num; j++) {
684 REG_WR(bp, write_addr[j], page_addr[i]);
685 for (k = 0; k < read_num; k++)
686 if (bnx2x_is_reg_online(bp, &read_addr[k]))
687 for (n = 0; n <
688 read_addr[k].size; n++)
689 *p++ = REG_RD(bp,
690 read_addr[k].addr + n*4);
691 }
692 }
693}
694
695static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
696{
697 u32 i, j;
698
699 /* Read the regular registers */
700 for (i = 0; i < REGS_COUNT; i++)
701 if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
702 for (j = 0; j < reg_addrs[i].size; j++)
703 *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
704
705 /* Read "paged" registes */
706 bnx2x_read_pages_regs(bp, p);
707}
708
709static void bnx2x_get_regs(struct net_device *dev,
710 struct ethtool_regs *regs, void *_p)
711{
712 u32 *p = _p;
713 struct bnx2x *bp = netdev_priv(dev);
714 struct dump_hdr dump_hdr = {0};
715
716 regs->version = 0;
717 memset(p, 0, regs->len);
718
719 if (!netif_running(bp->dev))
720 return;
721
722 /* Disable parity attentions as long as following dump may
723 * cause false alarms by reading never written registers. We
724 * will re-enable parity attentions right after the dump.
725 */
726 bnx2x_disable_blocks_parity(bp);
727
728 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
729 dump_hdr.dump_sign = dump_sign_all;
730 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
731 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
732 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
733 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
734
735 if (CHIP_IS_E1(bp))
736 dump_hdr.info = RI_E1_ONLINE;
737 else if (CHIP_IS_E1H(bp))
738 dump_hdr.info = RI_E1H_ONLINE;
739 else if (!CHIP_IS_E1x(bp))
740 dump_hdr.info = RI_E2_ONLINE |
741 (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
742
743 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
744 p += dump_hdr.hdr_size + 1;
745
746 /* Actually read the registers */
747 __bnx2x_get_regs(bp, p);
748
749 /* Re-enable parity attentions */
750 bnx2x_clear_blocks_parity(bp);
751 bnx2x_enable_blocks_parity(bp);
752}
753
754static void bnx2x_get_drvinfo(struct net_device *dev,
755 struct ethtool_drvinfo *info)
756{
757 struct bnx2x *bp = netdev_priv(dev);
758 u8 phy_fw_ver[PHY_FW_VER_LEN];
759
760 strcpy(info->driver, DRV_MODULE_NAME);
761 strcpy(info->version, DRV_MODULE_VERSION);
762
763 phy_fw_ver[0] = '\0';
764 if (bp->port.pmf) {
765 bnx2x_acquire_phy_lock(bp);
766 bnx2x_get_ext_phy_fw_version(&bp->link_params,
767 (bp->state != BNX2X_STATE_CLOSED),
768 phy_fw_ver, PHY_FW_VER_LEN);
769 bnx2x_release_phy_lock(bp);
770 }
771
772 strncpy(info->fw_version, bp->fw_ver, 32);
773 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
774 "bc %d.%d.%d%s%s",
775 (bp->common.bc_ver & 0xff0000) >> 16,
776 (bp->common.bc_ver & 0xff00) >> 8,
777 (bp->common.bc_ver & 0xff),
778 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
779 strcpy(info->bus_info, pci_name(bp->pdev));
780 info->n_stats = BNX2X_NUM_STATS;
781 info->testinfo_len = BNX2X_NUM_TESTS;
782 info->eedump_len = bp->common.flash_size;
783 info->regdump_len = bnx2x_get_regs_len(dev);
784}
785
786static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
787{
788 struct bnx2x *bp = netdev_priv(dev);
789
790 if (bp->flags & NO_WOL_FLAG) {
791 wol->supported = 0;
792 wol->wolopts = 0;
793 } else {
794 wol->supported = WAKE_MAGIC;
795 if (bp->wol)
796 wol->wolopts = WAKE_MAGIC;
797 else
798 wol->wolopts = 0;
799 }
800 memset(&wol->sopass, 0, sizeof(wol->sopass));
801}
802
803static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
804{
805 struct bnx2x *bp = netdev_priv(dev);
806
807 if (wol->wolopts & ~WAKE_MAGIC)
808 return -EINVAL;
809
810 if (wol->wolopts & WAKE_MAGIC) {
811 if (bp->flags & NO_WOL_FLAG)
812 return -EINVAL;
813
814 bp->wol = 1;
815 } else
816 bp->wol = 0;
817
818 return 0;
819}
820
821static u32 bnx2x_get_msglevel(struct net_device *dev)
822{
823 struct bnx2x *bp = netdev_priv(dev);
824
825 return bp->msg_enable;
826}
827
828static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
829{
830 struct bnx2x *bp = netdev_priv(dev);
831
832 if (capable(CAP_NET_ADMIN)) {
833 /* dump MCP trace */
834 if (level & BNX2X_MSG_MCP)
835 bnx2x_fw_dump_lvl(bp, KERN_INFO);
836 bp->msg_enable = level;
837 }
838}
839
840static int bnx2x_nway_reset(struct net_device *dev)
841{
842 struct bnx2x *bp = netdev_priv(dev);
843
844 if (!bp->port.pmf)
845 return 0;
846
847 if (netif_running(dev)) {
848 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
849 bnx2x_link_set(bp);
850 }
851
852 return 0;
853}
854
855static u32 bnx2x_get_link(struct net_device *dev)
856{
857 struct bnx2x *bp = netdev_priv(dev);
858
859 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
860 return 0;
861
862 return bp->link_vars.link_up;
863}
864
865static int bnx2x_get_eeprom_len(struct net_device *dev)
866{
867 struct bnx2x *bp = netdev_priv(dev);
868
869 return bp->common.flash_size;
870}
871
872static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
873{
874 int port = BP_PORT(bp);
875 int count, i;
876 u32 val = 0;
877
878 /* adjust timeout for emulation/FPGA */
879 count = BNX2X_NVRAM_TIMEOUT_COUNT;
880 if (CHIP_REV_IS_SLOW(bp))
881 count *= 100;
882
883 /* request access to nvram interface */
884 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
885 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
886
887 for (i = 0; i < count*10; i++) {
888 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
889 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
890 break;
891
892 udelay(5);
893 }
894
895 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
896 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
897 return -EBUSY;
898 }
899
900 return 0;
901}
902
903static int bnx2x_release_nvram_lock(struct bnx2x *bp)
904{
905 int port = BP_PORT(bp);
906 int count, i;
907 u32 val = 0;
908
909 /* adjust timeout for emulation/FPGA */
910 count = BNX2X_NVRAM_TIMEOUT_COUNT;
911 if (CHIP_REV_IS_SLOW(bp))
912 count *= 100;
913
914 /* relinquish nvram interface */
915 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
916 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
917
918 for (i = 0; i < count*10; i++) {
919 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
920 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
921 break;
922
923 udelay(5);
924 }
925
926 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
927 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
928 return -EBUSY;
929 }
930
931 return 0;
932}
933
934static void bnx2x_enable_nvram_access(struct bnx2x *bp)
935{
936 u32 val;
937
938 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
939
940 /* enable both bits, even on read */
941 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
942 (val | MCPR_NVM_ACCESS_ENABLE_EN |
943 MCPR_NVM_ACCESS_ENABLE_WR_EN));
944}
945
946static void bnx2x_disable_nvram_access(struct bnx2x *bp)
947{
948 u32 val;
949
950 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
951
952 /* disable both bits, even after read */
953 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
954 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
955 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
956}
957
958static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
959 u32 cmd_flags)
960{
961 int count, i, rc;
962 u32 val;
963
964 /* build the command word */
965 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
966
967 /* need to clear DONE bit separately */
968 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
969
970 /* address of the NVRAM to read from */
971 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
972 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
973
974 /* issue a read command */
975 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
976
977 /* adjust timeout for emulation/FPGA */
978 count = BNX2X_NVRAM_TIMEOUT_COUNT;
979 if (CHIP_REV_IS_SLOW(bp))
980 count *= 100;
981
982 /* wait for completion */
983 *ret_val = 0;
984 rc = -EBUSY;
985 for (i = 0; i < count; i++) {
986 udelay(5);
987 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
988
989 if (val & MCPR_NVM_COMMAND_DONE) {
990 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
991 /* we read nvram data in cpu order
992 * but ethtool sees it as an array of bytes
993 * converting to big-endian will do the work */
994 *ret_val = cpu_to_be32(val);
995 rc = 0;
996 break;
997 }
998 }
999
1000 return rc;
1001}
1002
1003static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
1004 int buf_size)
1005{
1006 int rc;
1007 u32 cmd_flags;
1008 __be32 val;
1009
1010 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1011 DP(BNX2X_MSG_NVM,
1012 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1013 offset, buf_size);
1014 return -EINVAL;
1015 }
1016
1017 if (offset + buf_size > bp->common.flash_size) {
1018 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
1019 " buf_size (0x%x) > flash_size (0x%x)\n",
1020 offset, buf_size, bp->common.flash_size);
1021 return -EINVAL;
1022 }
1023
1024 /* request access to nvram interface */
1025 rc = bnx2x_acquire_nvram_lock(bp);
1026 if (rc)
1027 return rc;
1028
1029 /* enable access to nvram interface */
1030 bnx2x_enable_nvram_access(bp);
1031
1032 /* read the first word(s) */
1033 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1034 while ((buf_size > sizeof(u32)) && (rc == 0)) {
1035 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1036 memcpy(ret_buf, &val, 4);
1037
1038 /* advance to the next dword */
1039 offset += sizeof(u32);
1040 ret_buf += sizeof(u32);
1041 buf_size -= sizeof(u32);
1042 cmd_flags = 0;
1043 }
1044
1045 if (rc == 0) {
1046 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1047 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1048 memcpy(ret_buf, &val, 4);
1049 }
1050
1051 /* disable access to nvram interface */
1052 bnx2x_disable_nvram_access(bp);
1053 bnx2x_release_nvram_lock(bp);
1054
1055 return rc;
1056}
1057
1058static int bnx2x_get_eeprom(struct net_device *dev,
1059 struct ethtool_eeprom *eeprom, u8 *eebuf)
1060{
1061 struct bnx2x *bp = netdev_priv(dev);
1062 int rc;
1063
1064 if (!netif_running(dev))
1065 return -EAGAIN;
1066
1067 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
1068 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
1069 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1070 eeprom->len, eeprom->len);
1071
1072 /* parameters already validated in ethtool_get_eeprom */
1073
1074 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
1075
1076 return rc;
1077}
1078
1079static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
1080 u32 cmd_flags)
1081{
1082 int count, i, rc;
1083
1084 /* build the command word */
1085 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
1086
1087 /* need to clear DONE bit separately */
1088 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1089
1090 /* write the data */
1091 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
1092
1093 /* address of the NVRAM to write to */
1094 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
1095 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1096
1097 /* issue the write command */
1098 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1099
1100 /* adjust timeout for emulation/FPGA */
1101 count = BNX2X_NVRAM_TIMEOUT_COUNT;
1102 if (CHIP_REV_IS_SLOW(bp))
1103 count *= 100;
1104
1105 /* wait for completion */
1106 rc = -EBUSY;
1107 for (i = 0; i < count; i++) {
1108 udelay(5);
1109 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
1110 if (val & MCPR_NVM_COMMAND_DONE) {
1111 rc = 0;
1112 break;
1113 }
1114 }
1115
1116 return rc;
1117}
1118
1119#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1120
1121static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
1122 int buf_size)
1123{
1124 int rc;
1125 u32 cmd_flags;
1126 u32 align_offset;
1127 __be32 val;
1128
1129 if (offset + buf_size > bp->common.flash_size) {
1130 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
1131 " buf_size (0x%x) > flash_size (0x%x)\n",
1132 offset, buf_size, bp->common.flash_size);
1133 return -EINVAL;
1134 }
1135
1136 /* request access to nvram interface */
1137 rc = bnx2x_acquire_nvram_lock(bp);
1138 if (rc)
1139 return rc;
1140
1141 /* enable access to nvram interface */
1142 bnx2x_enable_nvram_access(bp);
1143
1144 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1145 align_offset = (offset & ~0x03);
1146 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
1147
1148 if (rc == 0) {
1149 val &= ~(0xff << BYTE_OFFSET(offset));
1150 val |= (*data_buf << BYTE_OFFSET(offset));
1151
1152 /* nvram data is returned as an array of bytes
1153 * convert it back to cpu order */
1154 val = be32_to_cpu(val);
1155
1156 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
1157 cmd_flags);
1158 }
1159
1160 /* disable access to nvram interface */
1161 bnx2x_disable_nvram_access(bp);
1162 bnx2x_release_nvram_lock(bp);
1163
1164 return rc;
1165}
1166
1167static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1168 int buf_size)
1169{
1170 int rc;
1171 u32 cmd_flags;
1172 u32 val;
1173 u32 written_so_far;
1174
1175 if (buf_size == 1) /* ethtool */
1176 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
1177
1178 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1179 DP(BNX2X_MSG_NVM,
1180 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1181 offset, buf_size);
1182 return -EINVAL;
1183 }
1184
1185 if (offset + buf_size > bp->common.flash_size) {
1186 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
1187 " buf_size (0x%x) > flash_size (0x%x)\n",
1188 offset, buf_size, bp->common.flash_size);
1189 return -EINVAL;
1190 }
1191
1192 /* request access to nvram interface */
1193 rc = bnx2x_acquire_nvram_lock(bp);
1194 if (rc)
1195 return rc;
1196
1197 /* enable access to nvram interface */
1198 bnx2x_enable_nvram_access(bp);
1199
1200 written_so_far = 0;
1201 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1202 while ((written_so_far < buf_size) && (rc == 0)) {
1203 if (written_so_far == (buf_size - sizeof(u32)))
1204 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1205 else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
1206 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1207 else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
1208 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1209
1210 memcpy(&val, data_buf, 4);
1211
1212 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
1213
1214 /* advance to the next dword */
1215 offset += sizeof(u32);
1216 data_buf += sizeof(u32);
1217 written_so_far += sizeof(u32);
1218 cmd_flags = 0;
1219 }
1220
1221 /* disable access to nvram interface */
1222 bnx2x_disable_nvram_access(bp);
1223 bnx2x_release_nvram_lock(bp);
1224
1225 return rc;
1226}
1227
1228static int bnx2x_set_eeprom(struct net_device *dev,
1229 struct ethtool_eeprom *eeprom, u8 *eebuf)
1230{
1231 struct bnx2x *bp = netdev_priv(dev);
1232 int port = BP_PORT(bp);
1233 int rc = 0;
1234 u32 ext_phy_config;
1235 if (!netif_running(dev))
1236 return -EAGAIN;
1237
1238 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
1239 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
1240 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1241 eeprom->len, eeprom->len);
1242
1243 /* parameters already validated in ethtool_set_eeprom */
1244
1245 /* PHY eeprom can be accessed only by the PMF */
1246 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
1247 !bp->port.pmf)
1248 return -EINVAL;
1249
1250 ext_phy_config =
1251 SHMEM_RD(bp,
1252 dev_info.port_hw_config[port].external_phy_config);
1253
1254 if (eeprom->magic == 0x50485950) {
1255 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
1256 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1257
1258 bnx2x_acquire_phy_lock(bp);
1259 rc |= bnx2x_link_reset(&bp->link_params,
1260 &bp->link_vars, 0);
1261 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
1262 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
1263 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1264 MISC_REGISTERS_GPIO_HIGH, port);
1265 bnx2x_release_phy_lock(bp);
1266 bnx2x_link_report(bp);
1267
1268 } else if (eeprom->magic == 0x50485952) {
1269 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
1270 if (bp->state == BNX2X_STATE_OPEN) {
1271 bnx2x_acquire_phy_lock(bp);
1272 rc |= bnx2x_link_reset(&bp->link_params,
1273 &bp->link_vars, 1);
1274
1275 rc |= bnx2x_phy_init(&bp->link_params,
1276 &bp->link_vars);
1277 bnx2x_release_phy_lock(bp);
1278 bnx2x_calc_fc_adv(bp);
1279 }
1280 } else if (eeprom->magic == 0x53985943) {
1281 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
1282 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
1283 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
1284
1285 /* DSP Remove Download Mode */
1286 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1287 MISC_REGISTERS_GPIO_LOW, port);
1288
1289 bnx2x_acquire_phy_lock(bp);
1290
1291 bnx2x_sfx7101_sp_sw_reset(bp,
1292 &bp->link_params.phy[EXT_PHY1]);
1293
1294 /* wait 0.5 sec to allow it to run */
1295 msleep(500);
1296 bnx2x_ext_phy_hw_reset(bp, port);
1297 msleep(500);
1298 bnx2x_release_phy_lock(bp);
1299 }
1300 } else
1301 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
1302
1303 return rc;
1304}
1305
1306static int bnx2x_get_coalesce(struct net_device *dev,
1307 struct ethtool_coalesce *coal)
1308{
1309 struct bnx2x *bp = netdev_priv(dev);
1310
1311 memset(coal, 0, sizeof(struct ethtool_coalesce));
1312
1313 coal->rx_coalesce_usecs = bp->rx_ticks;
1314 coal->tx_coalesce_usecs = bp->tx_ticks;
1315
1316 return 0;
1317}
1318
1319static int bnx2x_set_coalesce(struct net_device *dev,
1320 struct ethtool_coalesce *coal)
1321{
1322 struct bnx2x *bp = netdev_priv(dev);
1323
1324 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
1325 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
1326 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
1327
1328 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
1329 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
1330 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
1331
1332 if (netif_running(dev))
1333 bnx2x_update_coalesce(bp);
1334
1335 return 0;
1336}
1337
1338static void bnx2x_get_ringparam(struct net_device *dev,
1339 struct ethtool_ringparam *ering)
1340{
1341 struct bnx2x *bp = netdev_priv(dev);
1342
1343 ering->rx_max_pending = MAX_RX_AVAIL;
1344 ering->rx_mini_max_pending = 0;
1345 ering->rx_jumbo_max_pending = 0;
1346
1347 if (bp->rx_ring_size)
1348 ering->rx_pending = bp->rx_ring_size;
1349 else
1350 ering->rx_pending = MAX_RX_AVAIL;
1351
1352 ering->rx_mini_pending = 0;
1353 ering->rx_jumbo_pending = 0;
1354
1355 ering->tx_max_pending = MAX_TX_AVAIL;
1356 ering->tx_pending = bp->tx_ring_size;
1357}
1358
1359static int bnx2x_set_ringparam(struct net_device *dev,
1360 struct ethtool_ringparam *ering)
1361{
1362 struct bnx2x *bp = netdev_priv(dev);
1363
1364 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1365 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1366 return -EAGAIN;
1367 }
1368
1369 if ((ering->rx_pending > MAX_RX_AVAIL) ||
1370 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1371 MIN_RX_SIZE_TPA)) ||
1372 (ering->tx_pending > MAX_TX_AVAIL) ||
1373 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
1374 return -EINVAL;
1375
1376 bp->rx_ring_size = ering->rx_pending;
1377 bp->tx_ring_size = ering->tx_pending;
1378
1379 return bnx2x_reload_if_running(dev);
1380}
1381
1382static void bnx2x_get_pauseparam(struct net_device *dev,
1383 struct ethtool_pauseparam *epause)
1384{
1385 struct bnx2x *bp = netdev_priv(dev);
1386 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
1387 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
1388 BNX2X_FLOW_CTRL_AUTO);
1389
1390 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
1391 BNX2X_FLOW_CTRL_RX);
1392 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
1393 BNX2X_FLOW_CTRL_TX);
1394
1395 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
1396 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
1397 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1398}
1399
1400static int bnx2x_set_pauseparam(struct net_device *dev,
1401 struct ethtool_pauseparam *epause)
1402{
1403 struct bnx2x *bp = netdev_priv(dev);
1404 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1405 if (IS_MF(bp))
1406 return 0;
1407
1408 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
1409 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
1410 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1411
1412 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
1413
1414 if (epause->rx_pause)
1415 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
1416
1417 if (epause->tx_pause)
1418 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
1419
1420 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
1421 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
1422
1423 if (epause->autoneg) {
1424 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
1425 DP(NETIF_MSG_LINK, "autoneg not supported\n");
1426 return -EINVAL;
1427 }
1428
1429 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
1430 bp->link_params.req_flow_ctrl[cfg_idx] =
1431 BNX2X_FLOW_CTRL_AUTO;
1432 }
1433 }
1434
1435 DP(NETIF_MSG_LINK,
1436 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
1437
1438 if (netif_running(dev)) {
1439 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1440 bnx2x_link_set(bp);
1441 }
1442
1443 return 0;
1444}
1445
1446static const struct {
1447 char string[ETH_GSTRING_LEN];
1448} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
1449 { "register_test (offline)" },
1450 { "memory_test (offline)" },
1451 { "loopback_test (offline)" },
1452 { "nvram_test (online)" },
1453 { "interrupt_test (online)" },
1454 { "link_test (online)" },
1455 { "idle check (online)" }
1456};
1457
1458enum {
1459 BNX2X_CHIP_E1_OFST = 0,
1460 BNX2X_CHIP_E1H_OFST,
1461 BNX2X_CHIP_E2_OFST,
1462 BNX2X_CHIP_E3_OFST,
1463 BNX2X_CHIP_E3B0_OFST,
1464 BNX2X_CHIP_MAX_OFST
1465};
1466
1467#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
1468#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
1469#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
1470#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
1471#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
1472
1473#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
1474#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
1475
1476static int bnx2x_test_registers(struct bnx2x *bp)
1477{
1478 int idx, i, rc = -ENODEV;
1479 u32 wr_val = 0, hw;
1480 int port = BP_PORT(bp);
1481 static const struct {
1482 u32 hw;
1483 u32 offset0;
1484 u32 offset1;
1485 u32 mask;
1486 } reg_tbl[] = {
1487/* 0 */ { BNX2X_CHIP_MASK_ALL,
1488 BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
1489 { BNX2X_CHIP_MASK_ALL,
1490 DORQ_REG_DB_ADDR0, 4, 0xffffffff },
1491 { BNX2X_CHIP_MASK_E1X,
1492 HC_REG_AGG_INT_0, 4, 0x000003ff },
1493 { BNX2X_CHIP_MASK_ALL,
1494 PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
1495 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
1496 PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
1497 { BNX2X_CHIP_MASK_E3B0,
1498 PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
1499 { BNX2X_CHIP_MASK_ALL,
1500 PRS_REG_CID_PORT_0, 4, 0x00ffffff },
1501 { BNX2X_CHIP_MASK_ALL,
1502 PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
1503 { BNX2X_CHIP_MASK_ALL,
1504 PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
1505 { BNX2X_CHIP_MASK_ALL,
1506 PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
1507/* 10 */ { BNX2X_CHIP_MASK_ALL,
1508 PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
1509 { BNX2X_CHIP_MASK_ALL,
1510 PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
1511 { BNX2X_CHIP_MASK_ALL,
1512 QM_REG_CONNNUM_0, 4, 0x000fffff },
1513 { BNX2X_CHIP_MASK_ALL,
1514 TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
1515 { BNX2X_CHIP_MASK_ALL,
1516 SRC_REG_KEYRSS0_0, 40, 0xffffffff },
1517 { BNX2X_CHIP_MASK_ALL,
1518 SRC_REG_KEYRSS0_7, 40, 0xffffffff },
1519 { BNX2X_CHIP_MASK_ALL,
1520 XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
1521 { BNX2X_CHIP_MASK_ALL,
1522 XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
1523 { BNX2X_CHIP_MASK_ALL,
1524 XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
1525 { BNX2X_CHIP_MASK_ALL,
1526 NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
1527/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
1528 NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
1529 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
1530 NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
1531 { BNX2X_CHIP_MASK_ALL,
1532 NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
1533 { BNX2X_CHIP_MASK_ALL,
1534 NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
1535 { BNX2X_CHIP_MASK_ALL,
1536 NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
1537 { BNX2X_CHIP_MASK_ALL,
1538 NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
1539 { BNX2X_CHIP_MASK_ALL,
1540 NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
1541 { BNX2X_CHIP_MASK_ALL,
1542 NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
1543 { BNX2X_CHIP_MASK_ALL,
1544 NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
1545 { BNX2X_CHIP_MASK_ALL,
1546 NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
1547/* 30 */ { BNX2X_CHIP_MASK_ALL,
1548 NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
1549 { BNX2X_CHIP_MASK_ALL,
1550 NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
1551 { BNX2X_CHIP_MASK_ALL,
1552 NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
1553 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
1554 NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
1555 { BNX2X_CHIP_MASK_ALL,
1556 NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
1557 { BNX2X_CHIP_MASK_ALL,
1558 NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
1559 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
1560 NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
1561 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
1562 NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
1563
1564 { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
1565 };
1566
1567 if (!netif_running(bp->dev))
1568 return rc;
1569
1570 if (CHIP_IS_E1(bp))
1571 hw = BNX2X_CHIP_MASK_E1;
1572 else if (CHIP_IS_E1H(bp))
1573 hw = BNX2X_CHIP_MASK_E1H;
1574 else if (CHIP_IS_E2(bp))
1575 hw = BNX2X_CHIP_MASK_E2;
1576 else if (CHIP_IS_E3B0(bp))
1577 hw = BNX2X_CHIP_MASK_E3B0;
1578 else /* e3 A0 */
1579 hw = BNX2X_CHIP_MASK_E3;
1580
1581 /* Repeat the test twice:
1582 First by writing 0x00000000, second by writing 0xffffffff */
1583 for (idx = 0; idx < 2; idx++) {
1584
1585 switch (idx) {
1586 case 0:
1587 wr_val = 0;
1588 break;
1589 case 1:
1590 wr_val = 0xffffffff;
1591 break;
1592 }
1593
1594 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
1595 u32 offset, mask, save_val, val;
1596 if (!(hw & reg_tbl[i].hw))
1597 continue;
1598
1599 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
1600 mask = reg_tbl[i].mask;
1601
1602 save_val = REG_RD(bp, offset);
1603
1604 REG_WR(bp, offset, wr_val & mask);
1605
1606 val = REG_RD(bp, offset);
1607
1608 /* Restore the original register's value */
1609 REG_WR(bp, offset, save_val);
1610
1611 /* verify value is as expected */
1612 if ((val & mask) != (wr_val & mask)) {
1613 DP(NETIF_MSG_HW,
1614 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
1615 offset, val, wr_val, mask);
1616 goto test_reg_exit;
1617 }
1618 }
1619 }
1620
1621 rc = 0;
1622
1623test_reg_exit:
1624 return rc;
1625}
1626
1627static int bnx2x_test_memory(struct bnx2x *bp)
1628{
1629 int i, j, rc = -ENODEV;
1630 u32 val, index;
1631 static const struct {
1632 u32 offset;
1633 int size;
1634 } mem_tbl[] = {
1635 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
1636 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
1637 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
1638 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
1639 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
1640 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
1641 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
1642
1643 { 0xffffffff, 0 }
1644 };
1645
1646 static const struct {
1647 char *name;
1648 u32 offset;
1649 u32 hw_mask[BNX2X_CHIP_MAX_OFST];
1650 } prty_tbl[] = {
1651 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
1652 {0x3ffc0, 0, 0, 0} },
1653 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
1654 {0x2, 0x2, 0, 0} },
1655 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
1656 {0, 0, 0, 0} },
1657 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
1658 {0x3ffc0, 0, 0, 0} },
1659 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
1660 {0x3ffc0, 0, 0, 0} },
1661 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
1662 {0x3ffc1, 0, 0, 0} },
1663
1664 { NULL, 0xffffffff, {0, 0, 0, 0} }
1665 };
1666
1667 if (!netif_running(bp->dev))
1668 return rc;
1669
1670 if (CHIP_IS_E1(bp))
1671 index = BNX2X_CHIP_E1_OFST;
1672 else if (CHIP_IS_E1H(bp))
1673 index = BNX2X_CHIP_E1H_OFST;
1674 else if (CHIP_IS_E2(bp))
1675 index = BNX2X_CHIP_E2_OFST;
1676 else /* e3 */
1677 index = BNX2X_CHIP_E3_OFST;
1678
1679 /* pre-Check the parity status */
1680 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1681 val = REG_RD(bp, prty_tbl[i].offset);
1682 if (val & ~(prty_tbl[i].hw_mask[index])) {
1683 DP(NETIF_MSG_HW,
1684 "%s is 0x%x\n", prty_tbl[i].name, val);
1685 goto test_mem_exit;
1686 }
1687 }
1688
1689 /* Go through all the memories */
1690 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
1691 for (j = 0; j < mem_tbl[i].size; j++)
1692 REG_RD(bp, mem_tbl[i].offset + j*4);
1693
1694 /* Check the parity status */
1695 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1696 val = REG_RD(bp, prty_tbl[i].offset);
1697 if (val & ~(prty_tbl[i].hw_mask[index])) {
1698 DP(NETIF_MSG_HW,
1699 "%s is 0x%x\n", prty_tbl[i].name, val);
1700 goto test_mem_exit;
1701 }
1702 }
1703
1704 rc = 0;
1705
1706test_mem_exit:
1707 return rc;
1708}
1709
1710static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1711{
1712 int cnt = 1400;
1713
1714 if (link_up) {
1715 while (bnx2x_link_test(bp, is_serdes) && cnt--)
1716 msleep(20);
1717
1718 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
1719 DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
1720 }
1721}
1722
1723static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1724{
1725 unsigned int pkt_size, num_pkts, i;
1726 struct sk_buff *skb;
1727 unsigned char *packet;
1728 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
1729 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
1730 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
1731 u16 tx_start_idx, tx_idx;
1732 u16 rx_start_idx, rx_idx;
1733 u16 pkt_prod, bd_prod, rx_comp_cons;
1734 struct sw_tx_bd *tx_buf;
1735 struct eth_tx_start_bd *tx_start_bd;
1736 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1737 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1738 dma_addr_t mapping;
1739 union eth_rx_cqe *cqe;
1740 u8 cqe_fp_flags, cqe_fp_type;
1741 struct sw_rx_bd *rx_buf;
1742 u16 len;
1743 int rc = -ENODEV;
1744
1745 /* check the loopback mode */
1746 switch (loopback_mode) {
1747 case BNX2X_PHY_LOOPBACK:
1748 if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
1749 return -EINVAL;
1750 break;
1751 case BNX2X_MAC_LOOPBACK:
1752 bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
1753 LOOPBACK_XMAC : LOOPBACK_BMAC;
1754 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1755 break;
1756 default:
1757 return -EINVAL;
1758 }
1759
1760 /* prepare the loopback packet */
1761 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1762 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1763 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
1764 if (!skb) {
1765 rc = -ENOMEM;
1766 goto test_loopback_exit;
1767 }
1768 packet = skb_put(skb, pkt_size);
1769 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
1770 memset(packet + ETH_ALEN, 0, ETH_ALEN);
1771 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
1772 for (i = ETH_HLEN; i < pkt_size; i++)
1773 packet[i] = (unsigned char) (i & 0xff);
1774 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1775 skb_headlen(skb), DMA_TO_DEVICE);
1776 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1777 rc = -ENOMEM;
1778 dev_kfree_skb(skb);
1779 BNX2X_ERR("Unable to map SKB\n");
1780 goto test_loopback_exit;
1781 }
1782
1783 /* send the loopback packet */
1784 num_pkts = 0;
1785 tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
1786 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1787
1788 pkt_prod = txdata->tx_pkt_prod++;
1789 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
1790 tx_buf->first_bd = txdata->tx_bd_prod;
1791 tx_buf->skb = skb;
1792 tx_buf->flags = 0;
1793
1794 bd_prod = TX_BD(txdata->tx_bd_prod);
1795 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
1796 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1797 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1798 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
1799 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1800 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1801 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1802 SET_FLAG(tx_start_bd->general_data,
1803 ETH_TX_START_BD_ETH_ADDR_TYPE,
1804 UNICAST_ADDRESS);
1805 SET_FLAG(tx_start_bd->general_data,
1806 ETH_TX_START_BD_HDR_NBDS,
1807 1);
1808
1809 /* turn on parsing and get a BD */
1810 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1811
1812 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
1813 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
1814
1815 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
1816 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1817
1818 wmb();
1819
1820 txdata->tx_db.data.prod += 2;
1821 barrier();
1822 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
1823
1824 mmiowb();
1825 barrier();
1826
1827 num_pkts++;
1828 txdata->tx_bd_prod += 2; /* start + pbd */
1829
1830 udelay(100);
1831
1832 tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
1833 if (tx_idx != tx_start_idx + num_pkts)
1834 goto test_loopback_exit;
1835
1836 /* Unlike HC IGU won't generate an interrupt for status block
1837 * updates that have been performed while interrupts were
1838 * disabled.
1839 */
1840 if (bp->common.int_block == INT_BLOCK_IGU) {
1841 /* Disable local BHes to prevent a dead-lock situation between
1842 * sch_direct_xmit() and bnx2x_run_loopback() (calling
1843 * bnx2x_tx_int()), as both are taking netif_tx_lock().
1844 */
1845 local_bh_disable();
1846 bnx2x_tx_int(bp, txdata);
1847 local_bh_enable();
1848 }
1849
1850 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1851 if (rx_idx != rx_start_idx + num_pkts)
1852 goto test_loopback_exit;
1853
1854 rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
1855 cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
1856 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1857 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
1858 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
1859 goto test_loopback_rx_exit;
1860
1861 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1862 if (len != pkt_size)
1863 goto test_loopback_rx_exit;
1864
1865 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
1866 dma_sync_single_for_cpu(&bp->pdev->dev,
1867 dma_unmap_addr(rx_buf, mapping),
1868 fp_rx->rx_buf_size, DMA_FROM_DEVICE);
1869 skb = rx_buf->skb;
1870 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
1871 for (i = ETH_HLEN; i < pkt_size; i++)
1872 if (*(skb->data + i) != (unsigned char) (i & 0xff))
1873 goto test_loopback_rx_exit;
1874
1875 rc = 0;
1876
1877test_loopback_rx_exit:
1878
1879 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
1880 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
1881 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
1882 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
1883
1884 /* Update producers */
1885 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
1886 fp_rx->rx_sge_prod);
1887
1888test_loopback_exit:
1889 bp->link_params.loopback_mode = LOOPBACK_NONE;
1890
1891 return rc;
1892}
1893
1894static int bnx2x_test_loopback(struct bnx2x *bp)
1895{
1896 int rc = 0, res;
1897
1898 if (BP_NOMCP(bp))
1899 return rc;
1900
1901 if (!netif_running(bp->dev))
1902 return BNX2X_LOOPBACK_FAILED;
1903
1904 bnx2x_netif_stop(bp, 1);
1905 bnx2x_acquire_phy_lock(bp);
1906
1907 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
1908 if (res) {
1909 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
1910 rc |= BNX2X_PHY_LOOPBACK_FAILED;
1911 }
1912
1913 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
1914 if (res) {
1915 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
1916 rc |= BNX2X_MAC_LOOPBACK_FAILED;
1917 }
1918
1919 bnx2x_release_phy_lock(bp);
1920 bnx2x_netif_start(bp);
1921
1922 return rc;
1923}
1924
1925#define CRC32_RESIDUAL 0xdebb20e3
1926
1927static int bnx2x_test_nvram(struct bnx2x *bp)
1928{
1929 static const struct {
1930 int offset;
1931 int size;
1932 } nvram_tbl[] = {
1933 { 0, 0x14 }, /* bootstrap */
1934 { 0x14, 0xec }, /* dir */
1935 { 0x100, 0x350 }, /* manuf_info */
1936 { 0x450, 0xf0 }, /* feature_info */
1937 { 0x640, 0x64 }, /* upgrade_key_info */
1938 { 0x708, 0x70 }, /* manuf_key_info */
1939 { 0, 0 }
1940 };
1941 __be32 buf[0x350 / 4];
1942 u8 *data = (u8 *)buf;
1943 int i, rc;
1944 u32 magic, crc;
1945
1946 if (BP_NOMCP(bp))
1947 return 0;
1948
1949 rc = bnx2x_nvram_read(bp, 0, data, 4);
1950 if (rc) {
1951 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
1952 goto test_nvram_exit;
1953 }
1954
1955 magic = be32_to_cpu(buf[0]);
1956 if (magic != 0x669955aa) {
1957 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
1958 rc = -ENODEV;
1959 goto test_nvram_exit;
1960 }
1961
1962 for (i = 0; nvram_tbl[i].size; i++) {
1963
1964 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
1965 nvram_tbl[i].size);
1966 if (rc) {
1967 DP(NETIF_MSG_PROBE,
1968 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
1969 goto test_nvram_exit;
1970 }
1971
1972 crc = ether_crc_le(nvram_tbl[i].size, data);
1973 if (crc != CRC32_RESIDUAL) {
1974 DP(NETIF_MSG_PROBE,
1975 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
1976 rc = -ENODEV;
1977 goto test_nvram_exit;
1978 }
1979 }
1980
1981test_nvram_exit:
1982 return rc;
1983}
1984
1985/* Send an EMPTY ramrod on the first queue */
1986static int bnx2x_test_intr(struct bnx2x *bp)
1987{
1988 struct bnx2x_queue_state_params params = {0};
1989
1990 if (!netif_running(bp->dev))
1991 return -ENODEV;
1992
1993 params.q_obj = &bp->fp->q_obj;
1994 params.cmd = BNX2X_Q_CMD_EMPTY;
1995
1996 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1997
1998 return bnx2x_queue_state_change(bp, &params);
1999}
2000
2001static void bnx2x_self_test(struct net_device *dev,
2002 struct ethtool_test *etest, u64 *buf)
2003{
2004 struct bnx2x *bp = netdev_priv(dev);
2005 u8 is_serdes;
2006 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2007 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2008 etest->flags |= ETH_TEST_FL_FAILED;
2009 return;
2010 }
2011
2012 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
2013
2014 if (!netif_running(dev))
2015 return;
2016
2017 /* offline tests are not supported in MF mode */
2018 if (IS_MF(bp))
2019 etest->flags &= ~ETH_TEST_FL_OFFLINE;
2020 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2021
2022 if (etest->flags & ETH_TEST_FL_OFFLINE) {
2023 int port = BP_PORT(bp);
2024 u32 val;
2025 u8 link_up;
2026
2027 /* save current value of input enable for TX port IF */
2028 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
2029 /* disable input for TX port IF */
2030 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
2031
2032 link_up = bp->link_vars.link_up;
2033
2034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2035 bnx2x_nic_load(bp, LOAD_DIAG);
2036 /* wait until link state is restored */
2037 bnx2x_wait_for_link(bp, 1, is_serdes);
2038
2039 if (bnx2x_test_registers(bp) != 0) {
2040 buf[0] = 1;
2041 etest->flags |= ETH_TEST_FL_FAILED;
2042 }
2043 if (bnx2x_test_memory(bp) != 0) {
2044 buf[1] = 1;
2045 etest->flags |= ETH_TEST_FL_FAILED;
2046 }
2047
2048 buf[2] = bnx2x_test_loopback(bp);
2049 if (buf[2] != 0)
2050 etest->flags |= ETH_TEST_FL_FAILED;
2051
2052 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2053
2054 /* restore input for TX port IF */
2055 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
2056
2057 bnx2x_nic_load(bp, LOAD_NORMAL);
2058 /* wait until link state is restored */
2059 bnx2x_wait_for_link(bp, link_up, is_serdes);
2060 }
2061 if (bnx2x_test_nvram(bp) != 0) {
2062 buf[3] = 1;
2063 etest->flags |= ETH_TEST_FL_FAILED;
2064 }
2065 if (bnx2x_test_intr(bp) != 0) {
2066 buf[4] = 1;
2067 etest->flags |= ETH_TEST_FL_FAILED;
2068 }
2069
2070 if (bnx2x_link_test(bp, is_serdes) != 0) {
2071 buf[5] = 1;
2072 etest->flags |= ETH_TEST_FL_FAILED;
2073 }
2074
2075#ifdef BNX2X_EXTRA_DEBUG
2076 bnx2x_panic_dump(bp);
2077#endif
2078}
2079
2080#define IS_PORT_STAT(i) \
2081 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
2082#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
2083#define IS_MF_MODE_STAT(bp) \
2084 (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
2085
2086/* ethtool statistics are displayed for all regular ethernet queues and the
2087 * fcoe L2 queue if not disabled
2088 */
2089static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
2090{
2091 return BNX2X_NUM_ETH_QUEUES(bp);
2092}
2093
2094static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2095{
2096 struct bnx2x *bp = netdev_priv(dev);
2097 int i, num_stats;
2098
2099 switch (stringset) {
2100 case ETH_SS_STATS:
2101 if (is_multi(bp)) {
2102 num_stats = bnx2x_num_stat_queues(bp) *
2103 BNX2X_NUM_Q_STATS;
2104 if (!IS_MF_MODE_STAT(bp))
2105 num_stats += BNX2X_NUM_STATS;
2106 } else {
2107 if (IS_MF_MODE_STAT(bp)) {
2108 num_stats = 0;
2109 for (i = 0; i < BNX2X_NUM_STATS; i++)
2110 if (IS_FUNC_STAT(i))
2111 num_stats++;
2112 } else
2113 num_stats = BNX2X_NUM_STATS;
2114 }
2115 return num_stats;
2116
2117 case ETH_SS_TEST:
2118 return BNX2X_NUM_TESTS;
2119
2120 default:
2121 return -EINVAL;
2122 }
2123}
2124
2125static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2126{
2127 struct bnx2x *bp = netdev_priv(dev);
2128 int i, j, k;
2129 char queue_name[MAX_QUEUE_NAME_LEN+1];
2130
2131 switch (stringset) {
2132 case ETH_SS_STATS:
2133 if (is_multi(bp)) {
2134 k = 0;
2135 for_each_eth_queue(bp, i) {
2136 memset(queue_name, 0, sizeof(queue_name));
2137 sprintf(queue_name, "%d", i);
2138 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
2139 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
2140 ETH_GSTRING_LEN,
2141 bnx2x_q_stats_arr[j].string,
2142 queue_name);
2143 k += BNX2X_NUM_Q_STATS;
2144 }
2145 if (IS_MF_MODE_STAT(bp))
2146 break;
2147 for (j = 0; j < BNX2X_NUM_STATS; j++)
2148 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
2149 bnx2x_stats_arr[j].string);
2150 } else {
2151 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
2152 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
2153 continue;
2154 strcpy(buf + j*ETH_GSTRING_LEN,
2155 bnx2x_stats_arr[i].string);
2156 j++;
2157 }
2158 }
2159 break;
2160
2161 case ETH_SS_TEST:
2162 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
2163 break;
2164 }
2165}
2166
2167static void bnx2x_get_ethtool_stats(struct net_device *dev,
2168 struct ethtool_stats *stats, u64 *buf)
2169{
2170 struct bnx2x *bp = netdev_priv(dev);
2171 u32 *hw_stats, *offset;
2172 int i, j, k;
2173
2174 if (is_multi(bp)) {
2175 k = 0;
2176 for_each_eth_queue(bp, i) {
2177 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
2178 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
2179 if (bnx2x_q_stats_arr[j].size == 0) {
2180 /* skip this counter */
2181 buf[k + j] = 0;
2182 continue;
2183 }
2184 offset = (hw_stats +
2185 bnx2x_q_stats_arr[j].offset);
2186 if (bnx2x_q_stats_arr[j].size == 4) {
2187 /* 4-byte counter */
2188 buf[k + j] = (u64) *offset;
2189 continue;
2190 }
2191 /* 8-byte counter */
2192 buf[k + j] = HILO_U64(*offset, *(offset + 1));
2193 }
2194 k += BNX2X_NUM_Q_STATS;
2195 }
2196 if (IS_MF_MODE_STAT(bp))
2197 return;
2198 hw_stats = (u32 *)&bp->eth_stats;
2199 for (j = 0; j < BNX2X_NUM_STATS; j++) {
2200 if (bnx2x_stats_arr[j].size == 0) {
2201 /* skip this counter */
2202 buf[k + j] = 0;
2203 continue;
2204 }
2205 offset = (hw_stats + bnx2x_stats_arr[j].offset);
2206 if (bnx2x_stats_arr[j].size == 4) {
2207 /* 4-byte counter */
2208 buf[k + j] = (u64) *offset;
2209 continue;
2210 }
2211 /* 8-byte counter */
2212 buf[k + j] = HILO_U64(*offset, *(offset + 1));
2213 }
2214 } else {
2215 hw_stats = (u32 *)&bp->eth_stats;
2216 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
2217 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
2218 continue;
2219 if (bnx2x_stats_arr[i].size == 0) {
2220 /* skip this counter */
2221 buf[j] = 0;
2222 j++;
2223 continue;
2224 }
2225 offset = (hw_stats + bnx2x_stats_arr[i].offset);
2226 if (bnx2x_stats_arr[i].size == 4) {
2227 /* 4-byte counter */
2228 buf[j] = (u64) *offset;
2229 j++;
2230 continue;
2231 }
2232 /* 8-byte counter */
2233 buf[j] = HILO_U64(*offset, *(offset + 1));
2234 j++;
2235 }
2236 }
2237}
2238
2239static int bnx2x_set_phys_id(struct net_device *dev,
2240 enum ethtool_phys_id_state state)
2241{
2242 struct bnx2x *bp = netdev_priv(dev);
2243
2244 if (!netif_running(dev))
2245 return -EAGAIN;
2246
2247 if (!bp->port.pmf)
2248 return -EOPNOTSUPP;
2249
2250 switch (state) {
2251 case ETHTOOL_ID_ACTIVE:
2252 return 1; /* cycle on/off once per second */
2253
2254 case ETHTOOL_ID_ON:
2255 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2256 LED_MODE_ON, SPEED_1000);
2257 break;
2258
2259 case ETHTOOL_ID_OFF:
2260 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2261 LED_MODE_FRONT_PANEL_OFF, 0);
2262
2263 break;
2264
2265 case ETHTOOL_ID_INACTIVE:
2266 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2267 LED_MODE_OPER,
2268 bp->link_vars.line_speed);
2269 }
2270
2271 return 0;
2272}
2273
2274static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2275 void *rules __always_unused)
2276{
2277 struct bnx2x *bp = netdev_priv(dev);
2278
2279 switch (info->cmd) {
2280 case ETHTOOL_GRXRINGS:
2281 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2282 return 0;
2283
2284 default:
2285 return -EOPNOTSUPP;
2286 }
2287}
2288
2289static int bnx2x_get_rxfh_indir(struct net_device *dev,
2290 struct ethtool_rxfh_indir *indir)
2291{
2292 struct bnx2x *bp = netdev_priv(dev);
2293 size_t copy_size =
2294 min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
2295 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
2296 size_t i;
2297
2298 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2299 return -EOPNOTSUPP;
2300
2301 /* Get the current configuration of the RSS indirection table */
2302 bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
2303
2304 /*
2305 * We can't use a memcpy() as an internal storage of an
2306 * indirection table is a u8 array while indir->ring_index
2307 * points to an array of u32.
2308 *
2309 * Indirection table contains the FW Client IDs, so we need to
2310 * align the returned table to the Client ID of the leading RSS
2311 * queue.
2312 */
2313 for (i = 0; i < copy_size; i++)
2314 indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
2315
2316 indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
2317
2318 return 0;
2319}
2320
2321static int bnx2x_set_rxfh_indir(struct net_device *dev,
2322 const struct ethtool_rxfh_indir *indir)
2323{
2324 struct bnx2x *bp = netdev_priv(dev);
2325 size_t i;
2326 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
2327 u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2328
2329 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2330 return -EOPNOTSUPP;
2331
2332 /* validate the size */
2333 if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
2334 return -EINVAL;
2335
2336 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
2337 /* validate the indices */
2338 if (indir->ring_index[i] >= num_eth_queues)
2339 return -EINVAL;
2340 /*
2341 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
2342 * as an internal storage of an indirection table is a u8 array
2343 * while indir->ring_index points to an array of u32.
2344 *
2345 * Indirection table contains the FW Client IDs, so we need to
2346 * align the received table to the Client ID of the leading RSS
2347 * queue
2348 */
2349 ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
2350 }
2351
2352 return bnx2x_config_rss_pf(bp, ind_table, false);
2353}
2354
2355static const struct ethtool_ops bnx2x_ethtool_ops = {
2356 .get_settings = bnx2x_get_settings,
2357 .set_settings = bnx2x_set_settings,
2358 .get_drvinfo = bnx2x_get_drvinfo,
2359 .get_regs_len = bnx2x_get_regs_len,
2360 .get_regs = bnx2x_get_regs,
2361 .get_wol = bnx2x_get_wol,
2362 .set_wol = bnx2x_set_wol,
2363 .get_msglevel = bnx2x_get_msglevel,
2364 .set_msglevel = bnx2x_set_msglevel,
2365 .nway_reset = bnx2x_nway_reset,
2366 .get_link = bnx2x_get_link,
2367 .get_eeprom_len = bnx2x_get_eeprom_len,
2368 .get_eeprom = bnx2x_get_eeprom,
2369 .set_eeprom = bnx2x_set_eeprom,
2370 .get_coalesce = bnx2x_get_coalesce,
2371 .set_coalesce = bnx2x_set_coalesce,
2372 .get_ringparam = bnx2x_get_ringparam,
2373 .set_ringparam = bnx2x_set_ringparam,
2374 .get_pauseparam = bnx2x_get_pauseparam,
2375 .set_pauseparam = bnx2x_set_pauseparam,
2376 .self_test = bnx2x_self_test,
2377 .get_sset_count = bnx2x_get_sset_count,
2378 .get_strings = bnx2x_get_strings,
2379 .set_phys_id = bnx2x_set_phys_id,
2380 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2381 .get_rxnfc = bnx2x_get_rxnfc,
2382 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2383 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2384};
2385
2386void bnx2x_set_ethtool_ops(struct net_device *netdev)
2387{
2388 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
2389}
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
new file mode 100644
index 00000000000..998652a1b85
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -0,0 +1,410 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#ifndef BNX2X_FW_DEFS_H
11#define BNX2X_FW_DEFS_H
12
13#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
14#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
15 (IRO[147].base + ((assertListEntry) * IRO[147].m1))
16#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
17 (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
18 IRO[153].m2))
19#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
20 (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
21 IRO[154].m2))
22#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
23 (IRO[159].base + ((funcId) * IRO[159].m1))
24#define CSTORM_FUNC_EN_OFFSET(funcId) \
25 (IRO[149].base + ((funcId) * IRO[149].m1))
26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
28 (IRO[315].base + ((pfId) * IRO[315].m1))
29#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
30 (IRO[316].base + ((pfId) * IRO[316].m1))
31#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
32 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
33#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
34 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
35#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
36 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
37#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
38 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
39#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
40 (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
41#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
42 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
43#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
44 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
45#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
46 (IRO[314].base + ((pfId) * IRO[314].m1))
47#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
48 (IRO[306].base + ((pfId) * IRO[306].m1))
49#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
50 (IRO[305].base + ((pfId) * IRO[305].m1))
51#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
52 (IRO[304].base + ((pfId) * IRO[304].m1))
53#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
54 (IRO[151].base + ((funcId) * IRO[151].m1))
55#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
56 (IRO[142].base + ((pfId) * IRO[142].m1))
57#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
58 (IRO[143].base + ((pfId) * IRO[143].m1))
59#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
60 (IRO[141].base + ((pfId) * IRO[141].m1))
61#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
62#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
63 (IRO[144].base + ((pfId) * IRO[144].m1))
64#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
65#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
66 (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
67#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
68 (IRO[133].base + ((sbId) * IRO[133].m1))
69#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
70 (IRO[134].base + ((sbId) * IRO[134].m1))
71#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
72 (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
73#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
74 (IRO[132].base + ((sbId) * IRO[132].m1))
75#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
76#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
77 (IRO[137].base + ((sbId) * IRO[137].m1))
78#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
79#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
80 (IRO[155].base + ((vfId) * IRO[155].m1))
81#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
82 (IRO[156].base + ((vfId) * IRO[156].m1))
83#define CSTORM_VF_TO_PF_OFFSET(funcId) \
84 (IRO[150].base + ((funcId) * IRO[150].m1))
85#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
86#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
87 (IRO[203].base + ((pfId) * IRO[203].m1))
88#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
89#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
90 (IRO[101].base + ((assertListEntry) * IRO[101].m1))
91#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
92#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
93 (IRO[108].base)
94#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
95 (IRO[201].base + ((pfId) * IRO[201].m1))
96#define TSTORM_FUNC_EN_OFFSET(funcId) \
97 (IRO[103].base + ((funcId) * IRO[103].m1))
98#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
99 (IRO[271].base + ((pfId) * IRO[271].m1))
100#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
101 (IRO[272].base + ((pfId) * IRO[272].m1))
102#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
103 (IRO[273].base + ((pfId) * IRO[273].m1))
104#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
105 (IRO[274].base + ((pfId) * IRO[274].m1))
106#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
107 (IRO[270].base + ((pfId) * IRO[270].m1))
108#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
109 (IRO[269].base + ((pfId) * IRO[269].m1))
110#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
111 (IRO[268].base + ((pfId) * IRO[268].m1))
112#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
113 (IRO[267].base + ((pfId) * IRO[267].m1))
114#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
115 (IRO[276].base + ((pfId) * IRO[276].m1))
116#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
117 (IRO[263].base + ((pfId) * IRO[263].m1))
118#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
119 (IRO[264].base + ((pfId) * IRO[264].m1))
120#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
121 (IRO[265].base + ((pfId) * IRO[265].m1))
122#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
123 (IRO[266].base + ((pfId) * IRO[266].m1))
124#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
125 (IRO[202].base + ((pfId) * IRO[202].m1))
126#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
127 (IRO[105].base + ((funcId) * IRO[105].m1))
128#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
129 (IRO[216].base + ((pfId) * IRO[216].m1))
130#define TSTORM_VF_TO_PF_OFFSET(funcId) \
131 (IRO[104].base + ((funcId) * IRO[104].m1))
132#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
133#define USTORM_AGG_DATA_SIZE (IRO[206].size)
134#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
135#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
136 (IRO[176].base + ((assertListEntry) * IRO[176].m1))
137#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
138 (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
139 IRO[205].m2))
140#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
141 (IRO[183].base + ((portId) * IRO[183].m1))
142#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
143 (IRO[317].base + ((pfId) * IRO[317].m1))
144#define USTORM_FUNC_EN_OFFSET(funcId) \
145 (IRO[178].base + ((funcId) * IRO[178].m1))
146#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
147 (IRO[281].base + ((pfId) * IRO[281].m1))
148#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
149 (IRO[282].base + ((pfId) * IRO[282].m1))
150#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
151 (IRO[286].base + ((pfId) * IRO[286].m1))
152#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
153 (IRO[283].base + ((pfId) * IRO[283].m1))
154#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
155 (IRO[279].base + ((pfId) * IRO[279].m1))
156#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
157 (IRO[278].base + ((pfId) * IRO[278].m1))
158#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
159 (IRO[277].base + ((pfId) * IRO[277].m1))
160#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
161 (IRO[280].base + ((pfId) * IRO[280].m1))
162#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
163 (IRO[284].base + ((pfId) * IRO[284].m1))
164#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
165 (IRO[285].base + ((pfId) * IRO[285].m1))
166#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
167 (IRO[182].base + ((pfId) * IRO[182].m1))
168#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
169 (IRO[180].base + ((funcId) * IRO[180].m1))
170#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
171 (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
172 IRO[209].m2))
173#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
174 (IRO[210].base + ((qzoneId) * IRO[210].m1))
175#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
176#define USTORM_TPA_BTR_SIZE (IRO[207].size)
177#define USTORM_VF_TO_PF_OFFSET(funcId) \
178 (IRO[179].base + ((funcId) * IRO[179].m1))
179#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
180#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
181#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
182#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
183 (IRO[50].base + ((assertListEntry) * IRO[50].m1))
184#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
185 (IRO[43].base + ((portId) * IRO[43].m1))
186#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
187 (IRO[45].base + ((pfId) * IRO[45].m1))
188#define XSTORM_FUNC_EN_OFFSET(funcId) \
189 (IRO[47].base + ((funcId) * IRO[47].m1))
190#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
191 (IRO[294].base + ((pfId) * IRO[294].m1))
192#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
193 (IRO[297].base + ((pfId) * IRO[297].m1))
194#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
195 (IRO[298].base + ((pfId) * IRO[298].m1))
196#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
197 (IRO[299].base + ((pfId) * IRO[299].m1))
198#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
199 (IRO[300].base + ((pfId) * IRO[300].m1))
200#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
201 (IRO[301].base + ((pfId) * IRO[301].m1))
202#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
203 (IRO[302].base + ((pfId) * IRO[302].m1))
204#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
205 (IRO[303].base + ((pfId) * IRO[303].m1))
206#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
207 (IRO[293].base + ((pfId) * IRO[293].m1))
208#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
209 (IRO[292].base + ((pfId) * IRO[292].m1))
210#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
211 (IRO[291].base + ((pfId) * IRO[291].m1))
212#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
213 (IRO[296].base + ((pfId) * IRO[296].m1))
214#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
215 (IRO[295].base + ((pfId) * IRO[295].m1))
216#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
217 (IRO[290].base + ((pfId) * IRO[290].m1))
218#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
219 (IRO[289].base + ((pfId) * IRO[289].m1))
220#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
221 (IRO[288].base + ((pfId) * IRO[288].m1))
222#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
223 (IRO[287].base + ((pfId) * IRO[287].m1))
224#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
225 (IRO[44].base + ((pfId) * IRO[44].m1))
226#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
227 (IRO[49].base + ((funcId) * IRO[49].m1))
228#define XSTORM_SPQ_DATA_OFFSET(funcId) \
229 (IRO[32].base + ((funcId) * IRO[32].m1))
230#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
231#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
232 (IRO[30].base + ((funcId) * IRO[30].m1))
233#define XSTORM_SPQ_PROD_OFFSET(funcId) \
234 (IRO[31].base + ((funcId) * IRO[31].m1))
235#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
236 (IRO[211].base + ((portId) * IRO[211].m1))
237#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
238 (IRO[212].base + ((portId) * IRO[212].m1))
239#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
240 (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
241 IRO[214].m2))
242#define XSTORM_VF_TO_PF_OFFSET(funcId) \
243 (IRO[48].base + ((funcId) * IRO[48].m1))
244#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
245
246/**
247* This file defines HSI constants for the ETH flow
248*/
249#ifdef _EVEREST_MICROCODE
250#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
251#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
252#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
253#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
254#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
255#endif
256
257
258/* Ethernet Ring parameters */
259#define X_ETH_LOCAL_RING_SIZE 13
260#define FIRST_BD_IN_PKT 0
261#define PARSE_BD_INDEX 1
262#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
263#define U_ETH_NUM_OF_SGES_TO_FETCH 8
264#define U_ETH_MAX_SGES_FOR_PACKET 3
265
266/* Rx ring params */
267#define U_ETH_LOCAL_BD_RING_SIZE 8
268#define U_ETH_LOCAL_SGE_RING_SIZE 10
269#define U_ETH_SGL_SIZE 8
270 /* The fw will padd the buffer with this value, so the IP header \
271 will be align to 4 Byte */
272#define IP_HEADER_ALIGNMENT_PADDING 2
273
274#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
275 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
276
277#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
278#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
279#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
280
281#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
282#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
283#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
284
285#define U_ETH_UNDEFINED_Q 0xFF
286
287#define T_ETH_INDIRECTION_TABLE_SIZE 128
288#define T_ETH_RSS_KEY 10
289#define ETH_NUM_OF_RSS_ENGINES_E2 72
290
291#define FILTER_RULES_COUNT 16
292#define MULTICAST_RULES_COUNT 16
293#define CLASSIFY_RULES_COUNT 16
294
295/*The CRC32 seed, that is used for the hash(reduction) multicast address */
296#define ETH_CRC32_HASH_SEED 0x00000000
297
298#define ETH_CRC32_HASH_BIT_SIZE (8)
299#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
300
301/* Maximal L2 clients supported */
302#define ETH_MAX_RX_CLIENTS_E1 18
303#define ETH_MAX_RX_CLIENTS_E1H 28
304#define ETH_MAX_RX_CLIENTS_E2 152
305
306/* Maximal statistics client Ids */
307#define MAX_STAT_COUNTER_ID_E1 36
308#define MAX_STAT_COUNTER_ID_E1H 56
309#define MAX_STAT_COUNTER_ID_E2 140
310
311#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
312#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
313#define MAX_MAC_CREDIT_E2 272 /* Per Path */
314#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
315#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
316#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
317
318
319/* Maximal aggregation queues supported */
320#define ETH_MAX_AGGREGATION_QUEUES_E1 32
321#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
322
323
324#define ETH_NUM_OF_MCAST_BINS 256
325#define ETH_NUM_OF_MCAST_ENGINES_E2 72
326
327#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
328#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
329 (ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
330#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
331 (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
332
333#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
334
335
336/**
337 * This file defines HSI constants common to all microcode flows
338 */
339
340#define PROTOCOL_STATE_BIT_OFFSET 6
341
342#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
343#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
344#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
345
346/* microcode fixed page page size 4K (chains and ring segments) */
347#define MC_PAGE_SIZE 4096
348
349/* Number of indices per slow-path SB */
350#define HC_SP_SB_MAX_INDICES 16
351
352/* Number of indices per SB */
353#define HC_SB_MAX_INDICES_E1X 8
354#define HC_SB_MAX_INDICES_E2 8
355
356#define HC_SB_MAX_SB_E1X 32
357#define HC_SB_MAX_SB_E2 136
358
359#define HC_SP_SB_ID 0xde
360
361#define HC_SB_MAX_SM 2
362
363#define HC_SB_MAX_DYNAMIC_INDICES 4
364
365/* max number of slow path commands per port */
366#define MAX_RAMRODS_PER_PORT 8
367
368
369/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
370
371#define TIMERS_TICK_SIZE_CHIP (1e-3)
372
373#define TSEMI_CLK1_RESUL_CHIP (1e-3)
374
375#define XSEMI_CLK1_RESUL_CHIP (1e-3)
376
377#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
378
379/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
380
381#define XSTORM_IP_ID_ROLL_HALF 0x8000
382#define XSTORM_IP_ID_ROLL_ALL 0
383
384#define FW_LOG_LIST_SIZE 50
385
386#define NUM_OF_SAFC_BITS 16
387#define MAX_COS_NUMBER 4
388#define MAX_TRAFFIC_TYPES 8
389#define MAX_PFC_PRIORITIES 8
390
391 /* used by array traffic_type_to_priority[] to mark traffic type \
392 that is not mapped to priority*/
393#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
394
395
396#define C_ERES_PER_PAGE \
397 (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
398#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
399
400#define STATS_QUERY_CMD_COUNT 16
401
402#define NIV_LIST_TABLE_SIZE 4096
403
404#define INVALID_VNIC_ID 0xFF
405
406
407#define UNDEF_IRO 0x80000000
408
409
410#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
new file mode 100644
index 00000000000..f4a07fbaed0
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -0,0 +1,38 @@
1/* bnx2x_fw_file_hdr.h: FW binary file header structure.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Vladislav Zolotarov <vladz@broadcom.com>
11 * Based on the original idea of John Wright <john.wright@hp.com>.
12 */
13
14#ifndef BNX2X_INIT_FILE_HDR_H
15#define BNX2X_INIT_FILE_HDR_H
16
17struct bnx2x_fw_file_section {
18 __be32 len;
19 __be32 offset;
20};
21
22struct bnx2x_fw_file_hdr {
23 struct bnx2x_fw_file_section init_ops;
24 struct bnx2x_fw_file_section init_ops_offsets;
25 struct bnx2x_fw_file_section init_data;
26 struct bnx2x_fw_file_section tsem_int_table_data;
27 struct bnx2x_fw_file_section tsem_pram_data;
28 struct bnx2x_fw_file_section usem_int_table_data;
29 struct bnx2x_fw_file_section usem_pram_data;
30 struct bnx2x_fw_file_section csem_int_table_data;
31 struct bnx2x_fw_file_section csem_pram_data;
32 struct bnx2x_fw_file_section xsem_int_table_data;
33 struct bnx2x_fw_file_section xsem_pram_data;
34 struct bnx2x_fw_file_section iro_arr;
35 struct bnx2x_fw_file_section fw_version;
36};
37
38#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
new file mode 100644
index 00000000000..dc24de40e33
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -0,0 +1,5131 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9#ifndef BNX2X_HSI_H
10#define BNX2X_HSI_H
11
12#include "bnx2x_fw_defs.h"
13
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
15
16struct license_key {
17 u32 reserved[6];
18
19 u32 max_iscsi_conn;
20#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
21#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
22#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
23#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
24
25 u32 reserved_a;
26
27 u32 max_fcoe_conn;
28#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
29#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
30#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
31#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
32
33 u32 reserved_b[4];
34};
35
36
37#define PORT_0 0
38#define PORT_1 1
39#define PORT_MAX 2
40
41/****************************************************************************
42 * Shared HW configuration *
43 ****************************************************************************/
44#define PIN_CFG_NA 0x00000000
45#define PIN_CFG_GPIO0_P0 0x00000001
46#define PIN_CFG_GPIO1_P0 0x00000002
47#define PIN_CFG_GPIO2_P0 0x00000003
48#define PIN_CFG_GPIO3_P0 0x00000004
49#define PIN_CFG_GPIO0_P1 0x00000005
50#define PIN_CFG_GPIO1_P1 0x00000006
51#define PIN_CFG_GPIO2_P1 0x00000007
52#define PIN_CFG_GPIO3_P1 0x00000008
53#define PIN_CFG_EPIO0 0x00000009
54#define PIN_CFG_EPIO1 0x0000000a
55#define PIN_CFG_EPIO2 0x0000000b
56#define PIN_CFG_EPIO3 0x0000000c
57#define PIN_CFG_EPIO4 0x0000000d
58#define PIN_CFG_EPIO5 0x0000000e
59#define PIN_CFG_EPIO6 0x0000000f
60#define PIN_CFG_EPIO7 0x00000010
61#define PIN_CFG_EPIO8 0x00000011
62#define PIN_CFG_EPIO9 0x00000012
63#define PIN_CFG_EPIO10 0x00000013
64#define PIN_CFG_EPIO11 0x00000014
65#define PIN_CFG_EPIO12 0x00000015
66#define PIN_CFG_EPIO13 0x00000016
67#define PIN_CFG_EPIO14 0x00000017
68#define PIN_CFG_EPIO15 0x00000018
69#define PIN_CFG_EPIO16 0x00000019
70#define PIN_CFG_EPIO17 0x0000001a
71#define PIN_CFG_EPIO18 0x0000001b
72#define PIN_CFG_EPIO19 0x0000001c
73#define PIN_CFG_EPIO20 0x0000001d
74#define PIN_CFG_EPIO21 0x0000001e
75#define PIN_CFG_EPIO22 0x0000001f
76#define PIN_CFG_EPIO23 0x00000020
77#define PIN_CFG_EPIO24 0x00000021
78#define PIN_CFG_EPIO25 0x00000022
79#define PIN_CFG_EPIO26 0x00000023
80#define PIN_CFG_EPIO27 0x00000024
81#define PIN_CFG_EPIO28 0x00000025
82#define PIN_CFG_EPIO29 0x00000026
83#define PIN_CFG_EPIO30 0x00000027
84#define PIN_CFG_EPIO31 0x00000028
85
86/* EPIO definition */
87#define EPIO_CFG_NA 0x00000000
88#define EPIO_CFG_EPIO0 0x00000001
89#define EPIO_CFG_EPIO1 0x00000002
90#define EPIO_CFG_EPIO2 0x00000003
91#define EPIO_CFG_EPIO3 0x00000004
92#define EPIO_CFG_EPIO4 0x00000005
93#define EPIO_CFG_EPIO5 0x00000006
94#define EPIO_CFG_EPIO6 0x00000007
95#define EPIO_CFG_EPIO7 0x00000008
96#define EPIO_CFG_EPIO8 0x00000009
97#define EPIO_CFG_EPIO9 0x0000000a
98#define EPIO_CFG_EPIO10 0x0000000b
99#define EPIO_CFG_EPIO11 0x0000000c
100#define EPIO_CFG_EPIO12 0x0000000d
101#define EPIO_CFG_EPIO13 0x0000000e
102#define EPIO_CFG_EPIO14 0x0000000f
103#define EPIO_CFG_EPIO15 0x00000010
104#define EPIO_CFG_EPIO16 0x00000011
105#define EPIO_CFG_EPIO17 0x00000012
106#define EPIO_CFG_EPIO18 0x00000013
107#define EPIO_CFG_EPIO19 0x00000014
108#define EPIO_CFG_EPIO20 0x00000015
109#define EPIO_CFG_EPIO21 0x00000016
110#define EPIO_CFG_EPIO22 0x00000017
111#define EPIO_CFG_EPIO23 0x00000018
112#define EPIO_CFG_EPIO24 0x00000019
113#define EPIO_CFG_EPIO25 0x0000001a
114#define EPIO_CFG_EPIO26 0x0000001b
115#define EPIO_CFG_EPIO27 0x0000001c
116#define EPIO_CFG_EPIO28 0x0000001d
117#define EPIO_CFG_EPIO29 0x0000001e
118#define EPIO_CFG_EPIO30 0x0000001f
119#define EPIO_CFG_EPIO31 0x00000020
120
121
122struct shared_hw_cfg { /* NVRAM Offset */
123 /* Up to 16 bytes of NULL-terminated string */
124 u8 part_num[16]; /* 0x104 */
125
126 u32 config; /* 0x114 */
127 #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
128 #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
129 #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
130 #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
131 #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002
132
133 #define SHARED_HW_CFG_PORT_SWAP 0x00000004
134
135 #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
136
137 #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000
138 #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010
139
140 #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
141 #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
142 /* Whatever MFW found in NVM
143 (if multiple found, priority order is: NC-SI, UMP, IPMI) */
144 #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
145 #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
146 #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
147 #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
148 /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
149 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
150 #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
151 /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
152 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
153 #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
154 /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
155 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
156 #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
157
158 #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000
159 #define SHARED_HW_CFG_LED_MODE_SHIFT 16
160 #define SHARED_HW_CFG_LED_MAC1 0x00000000
161 #define SHARED_HW_CFG_LED_PHY1 0x00010000
162 #define SHARED_HW_CFG_LED_PHY2 0x00020000
163 #define SHARED_HW_CFG_LED_PHY3 0x00030000
164 #define SHARED_HW_CFG_LED_MAC2 0x00040000
165 #define SHARED_HW_CFG_LED_PHY4 0x00050000
166 #define SHARED_HW_CFG_LED_PHY5 0x00060000
167 #define SHARED_HW_CFG_LED_PHY6 0x00070000
168 #define SHARED_HW_CFG_LED_MAC3 0x00080000
169 #define SHARED_HW_CFG_LED_PHY7 0x00090000
170 #define SHARED_HW_CFG_LED_PHY9 0x000a0000
171 #define SHARED_HW_CFG_LED_PHY11 0x000b0000
172 #define SHARED_HW_CFG_LED_MAC4 0x000c0000
173 #define SHARED_HW_CFG_LED_PHY8 0x000d0000
174 #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
175
176
177 #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
178 #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
179 #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000
180 #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000
181 #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000
182 #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000
183 #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
184 #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000
185
186 #define SHARED_HW_CFG_SRIOV_MASK 0x40000000
187 #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000
188 #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000
189
190 #define SHARED_HW_CFG_ATC_MASK 0x80000000
191 #define SHARED_HW_CFG_ATC_DISABLED 0x00000000
192 #define SHARED_HW_CFG_ATC_ENABLED 0x80000000
193
194 u32 config2; /* 0x118 */
195 /* one time auto detect grace period (in sec) */
196 #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff
197 #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0
198
199 #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
200 #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000
201
202 /* The default value for the core clock is 250MHz and it is
203 achieved by setting the clock change to 4 */
204 #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00
205 #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9
206
207 #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000
208 #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
209 #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
210
211 #define SHARED_HW_CFG_HIDE_PORT1 0x00002000
212
213 #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000
214 #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000
215 #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000
216
217 /* Output low when PERST is asserted */
218 #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000
219 #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000
220 #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000
221
222 #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000
223 #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16
224 #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000
225 #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000
226 #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000
227 #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000
228
229 /* The fan failure mechanism is usually related to the PHY type
230 since the power consumption of the board is determined by the PHY.
231 Currently, fan is required for most designs with SFX7101, BCM8727
232 and BCM8481. If a fan is not required for a board which uses one
233 of those PHYs, this field should be set to "Disabled". If a fan is
234 required for a different PHY type, this option should be set to
235 "Enabled". The fan failure indication is expected on SPIO5 */
236 #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
237 #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
238 #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
239 #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
240 #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
241
242 /* ASPM Power Management support */
243 #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000
244 #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21
245 #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000
246 #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000
247 #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000
248 #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000
249
250 /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
251 tl_control_0 (register 0x2800) */
252 #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000
253 #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000
254 #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000
255
256 #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000
257 #define SHARED_HW_CFG_PORT_MODE_2 0x00000000
258 #define SHARED_HW_CFG_PORT_MODE_4 0x01000000
259
260 #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000
261 #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000
262 #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000
263
264 /* Set the MDC/MDIO access for the first external phy */
265 #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
266 #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
267 #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
268 #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
269 #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
270 #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
271 #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
272
273 /* Set the MDC/MDIO access for the second external phy */
274 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
275 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
276 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
277 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
278 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
279 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
280 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
281
282
283 u32 power_dissipated; /* 0x11c */
284 #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
285 #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
286 #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
287 #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
288 #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
289 #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
290
291 #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
292 #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
293
294 u32 ump_nc_si_config; /* 0x120 */
295 #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
296 #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
297 #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
298 #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
299 #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
300 #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
301
302 #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00
303 #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8
304
305 #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000
306 #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16
307 #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000
308 #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
309
310 u32 board; /* 0x124 */
311 #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F
312 #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0
313 #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0
314 #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6
315 /* Use the PIN_CFG_XXX defines on top */
316 #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000
317 #define SHARED_HW_CFG_BOARD_REV_SHIFT 16
318
319 #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000
320 #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
321
322 #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000
323 #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
324
325 u32 wc_lane_config; /* 0x128 */
326 #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
327 #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0
328 #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b
329 #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4
330 #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b
331 #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4
332 #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
333 #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
334 #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
335 #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
336
337 /* TX lane Polarity swap */
338 #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000
339 #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000
340 #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000
341 #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000
342 /* TX lane Polarity swap */
343 #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000
344 #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000
345 #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000
346 #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000
347
348 /* Selects the port layout of the board */
349 #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000
350 #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24
351 #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000
352 #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000
353 #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000
354 #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000
355 #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000
356 #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000
357};
358
359
360/****************************************************************************
361 * Port HW configuration *
362 ****************************************************************************/
363struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
364
365 u32 pci_id;
366 #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
367 #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
368
369 u32 pci_sub_id;
370 #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000
371 #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff
372
373 u32 power_dissipated;
374 #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff
375 #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
376 #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00
377 #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
378 #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000
379 #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
380 #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000
381 #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
382
383 u32 power_consumed;
384 #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff
385 #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
386 #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00
387 #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
388 #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000
389 #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
390 #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000
391 #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
392
393 u32 mac_upper;
394 #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff
395 #define PORT_HW_CFG_UPPERMAC_SHIFT 0
396 u32 mac_lower;
397
398 u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */
399 u32 iscsi_mac_lower;
400
401 u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */
402 u32 rdma_mac_lower;
403
404 u32 serdes_config;
405 #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff
406 #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
407
408 #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000
409 #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
410
411
412 /* Default values: 2P-64, 4P-32 */
413 u32 pf_config; /* 0x158 */
414 #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F
415 #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0
416
417 /* Default values: 17 */
418 #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00
419 #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8
420
421 #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000
422 #define PORT_HW_CFG_FLR_ENABLED 0x00010000
423
424 u32 vf_config; /* 0x15C */
425 #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F
426 #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0
427
428 #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000
429 #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16
430
431 u32 mf_pci_id; /* 0x160 */
432 #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF
433 #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0
434
435 /* Controls the TX laser of the SFP+ module */
436 u32 sfp_ctrl; /* 0x164 */
437 #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
438 #define PORT_HW_CFG_TX_LASER_SHIFT 0
439 #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
440 #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
441 #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
442 #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
443 #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
444
445 /* Controls the fault module LED of the SFP+ */
446 #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
447 #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
448 #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
449 #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
450 #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
451 #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
452 #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
453
454 /* The output pin TX_DIS that controls the TX laser of the SFP+
455 module. Use the PIN_CFG_XXX defines on top */
456 u32 e3_sfp_ctrl; /* 0x168 */
457 #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF
458 #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0
459
460 /* The output pin for SFPP_TYPE which turns on the Fault module LED */
461 #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00
462 #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8
463
464 /* The input pin MOD_ABS that indicates whether SFP+ module is
465 present or not. Use the PIN_CFG_XXX defines on top */
466 #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000
467 #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16
468
469 /* The output pin PWRDIS_SFP_X which disable the power of the SFP+
470 module. Use the PIN_CFG_XXX defines on top */
471 #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000
472 #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24
473
474 /*
475 * The input pin which signals module transmit fault. Use the
476 * PIN_CFG_XXX defines on top
477 */
478 u32 e3_cmn_pin_cfg; /* 0x16C */
479 #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF
480 #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0
481
482 /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
483 top */
484 #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00
485 #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8
486
487 /*
488 * The output pin which powers down the PHY. Use the PIN_CFG_XXX
489 * defines on top
490 */
491 #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000
492 #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16
493
494 /* The output pin values BSC_SEL which selects the I2C for this port
495 in the I2C Mux */
496 #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000
497 #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000
498
499
500 /*
501 * The input pin I_FAULT which indicate over-current has occurred.
502 * Use the PIN_CFG_XXX defines on top
503 */
504 u32 e3_cmn_pin_cfg1; /* 0x170 */
505 #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
506 #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
507 u32 reserved0[7]; /* 0x174 */
508
509 u32 aeu_int_mask; /* 0x190 */
510
511 u32 media_type; /* 0x194 */
512 #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF
513 #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0
514
515 #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00
516 #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8
517
518 #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000
519 #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16
520
521 /* 4 times 16 bits for all 4 lanes. In case external PHY is present
522 (not direct mode), those values will not take effect on the 4 XGXS
523 lanes. For some external PHYs (such as 8706 and 8726) the values
524 will be used to configure the external PHY in those cases, not
525 all 4 values are needed. */
526 u16 xgxs_config_rx[4]; /* 0x198 */
527 u16 xgxs_config_tx[4]; /* 0x1A0 */
528
529 /* For storing FCOE mac on shared memory */
530 u32 fcoe_fip_mac_upper;
531 #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff
532 #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0
533 u32 fcoe_fip_mac_lower;
534
535 u32 fcoe_wwn_port_name_upper;
536 u32 fcoe_wwn_port_name_lower;
537
538 u32 fcoe_wwn_node_name_upper;
539 u32 fcoe_wwn_node_name_lower;
540
541 u32 Reserved1[49]; /* 0x1C0 */
542
543 /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
544 84833 only */
545 u32 xgbt_phy_cfg; /* 0x284 */
546 #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF
547 #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0
548
549 u32 default_cfg; /* 0x288 */
550 #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
551 #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
552 #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
553 #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
554 #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
555 #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
556
557 #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
558 #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
559 #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
560 #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
561 #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
562 #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
563
564 #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
565 #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
566 #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
567 #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
568 #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
569 #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
570
571 #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
572 #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
573 #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
574 #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
575 #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
576 #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
577
578 /* When KR link is required to be set to force which is not
579 KR-compliant, this parameter determine what is the trigger for it.
580 When GPIO is selected, low input will force the speed. Currently
581 default speed is 1G. In the future, it may be widen to select the
582 forced speed in with another parameter. Note when force-1G is
583 enabled, it override option 56: Link Speed option. */
584 #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
585 #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
586 #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
587 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
588 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
589 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
590 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
591 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
592 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
593 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
594 #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
595 #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
596 /* Enable to determine with which GPIO to reset the external phy */
597 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
598 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
599 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
600 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
601 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
602 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
603 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
604 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
605 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
606 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
607 #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
608
609 /* Enable BAM on KR */
610 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
611 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
612 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
613 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
614
615 /* Enable Common Mode Sense */
616 #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
617 #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
618 #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
619 #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
620
621 /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
622 #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000
623 #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22
624 #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000
625 #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000
626
627 /* Determine the Serdes electrical interface */
628 #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
629 #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
630 #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000
631 #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000
632 #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000
633 #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000
634 #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000
635 #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000
636
637
638 u32 speed_capability_mask2; /* 0x28C */
639 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
640 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
641 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
642 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
643 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
644 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
645 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
646 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
647 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
648 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080
649
650 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
651 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
652 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
653 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
654 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
655 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
656 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
657 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
658 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
659 #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000
660
661
662 /* In the case where two media types (e.g. copper and fiber) are
663 present and electrically active at the same time, PHY Selection
664 will determine which of the two PHYs will be designated as the
665 Active PHY and used for a connection to the network. */
666 u32 multi_phy_config; /* 0x290 */
667 #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
668 #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
669 #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
670 #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
671 #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
672 #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
673 #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
674
675 /* When enabled, all second phy nvram parameters will be swapped
676 with the first phy parameters */
677 #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
678 #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
679 #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
680 #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
681
682
683 /* Address of the second external phy */
684 u32 external_phy_config2; /* 0x294 */
685 #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
686 #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
687
688 /* The second XGXS external PHY type */
689 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
690 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
691 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
692 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
693 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
694 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
695 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
696 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
697 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
698 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
699 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
700 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
701 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
702 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
703 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
704 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
705 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00
706 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
707 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
708 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
709
710
711 /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
712 8706, 8726 and 8727) not all 4 values are needed. */
713 u16 xgxs_config2_rx[4]; /* 0x296 */
714 u16 xgxs_config2_tx[4]; /* 0x2A0 */
715
716 u32 lane_config;
717 #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
718 #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
719 /* AN and forced */
720 #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
721 /* forced only */
722 #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
723 /* forced only */
724 #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
725 /* forced only */
726 #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
727 #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
728 #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
729 #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
730 #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
731 #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000
732 #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
733
734 /* Indicate whether to swap the external phy polarity */
735 #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
736 #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
737 #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
738
739
740 u32 external_phy_config;
741 #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
742 #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
743
744 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00
745 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
746 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
747 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100
748 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200
749 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300
750 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400
751 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
752 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600
753 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
754 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
755 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
756 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
757 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
758 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00
759 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
760 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00
761 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
762 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
763 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
764 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
765
766 #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000
767 #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
768
769 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
770 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
771 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
772 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000
773 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000
774 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
775
776 u32 speed_capability_mask;
777 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff
778 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
779 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
780 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
781 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
782 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
783 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
784 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
785 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
786 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080
787 #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
788
789 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000
790 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
791 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
792 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
793 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
794 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
795 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
796 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
797 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
798 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000
799 #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
800
801 /* A place to hold the original MAC address as a backup */
802 u32 backup_mac_upper; /* 0x2B4 */
803 u32 backup_mac_lower; /* 0x2B8 */
804
805};
806
807
808/****************************************************************************
809 * Shared Feature configuration *
810 ****************************************************************************/
811struct shared_feat_cfg { /* NVRAM Offset */
812
813 u32 config; /* 0x450 */
814 #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
815
816 /* Use NVRAM values instead of HW default values */
817 #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
818 0x00000002
819 #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
820 0x00000000
821 #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
822 0x00000002
823
824 #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008
825 #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000
826 #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008
827
828 #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030
829 #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4
830
831 /* Override the OTP back to single function mode. When using GPIO,
832 high means only SF, 0 is according to CLP configuration */
833 #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
834 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
835 #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
836 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
837 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
838 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
839
840 /* The interval in seconds between sending LLDP packets. Set to zero
841 to disable the feature */
842 #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000
843 #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16
844
845 /* The assigned device type ID for LLDP usage */
846 #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000
847 #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24
848
849};
850
851
852/****************************************************************************
853 * Port Feature configuration *
854 ****************************************************************************/
855struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
856
857 u32 config;
858 #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
859 #define PORT_FEATURE_BAR1_SIZE_SHIFT 0
860 #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000
861 #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001
862 #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002
863 #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003
864 #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004
865 #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005
866 #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006
867 #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007
868 #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008
869 #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009
870 #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a
871 #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b
872 #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c
873 #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d
874 #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e
875 #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f
876 #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0
877 #define PORT_FEATURE_BAR2_SIZE_SHIFT 4
878 #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000
879 #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010
880 #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020
881 #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030
882 #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040
883 #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050
884 #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060
885 #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070
886 #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080
887 #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090
888 #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0
889 #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0
890 #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0
891 #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0
892 #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0
893 #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0
894
895 #define PORT_FEAT_CFG_DCBX_MASK 0x00000100
896 #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
897 #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
898
899 #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200
900 #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9
901 #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000
902 #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200
903
904 #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
905 #define PORT_FEATURE_EN_SIZE_SHIFT 24
906 #define PORT_FEATURE_WOL_ENABLED 0x01000000
907 #define PORT_FEATURE_MBA_ENABLED 0x02000000
908 #define PORT_FEATURE_MFW_ENABLED 0x04000000
909
910 /* Advertise expansion ROM even if MBA is disabled */
911 #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000
912 #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000
913 #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000
914
915 /* Check the optic vendor via i2c against a list of approved modules
916 in a separate nvram image */
917 #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000
918 #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
919 #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
920 0x00000000
921 #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
922 0x20000000
923 #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
924 #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
925
926 u32 wol_config;
927 /* Default is used when driver sets to "auto" mode */
928 #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003
929 #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0
930 #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000
931 #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001
932 #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002
933 #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003
934 #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004
935 #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008
936 #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
937
938 u32 mba_config;
939 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007
940 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
941 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
942 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
943 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
944 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
945 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004
946 #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007
947
948 #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038
949 #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3
950
951 #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100
952 #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200
953 #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
954 #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800
955 #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
956 #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
957 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000
958 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
959 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
960 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
961 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
962 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
963 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
964 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
965 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
966 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
967 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
968 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
969 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
970 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
971 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
972 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
973 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
974 #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
975 #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000
976 #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
977 #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
978 #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
979 #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
980 #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
981 #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
982 #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
983 #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000
984 #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
985 #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
986 #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000
987 #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000
988 #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000
989 #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000
990 #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000
991 #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000
992 #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000
993 #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000
994 u32 bmc_config;
995 #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001
996 #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000
997 #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001
998
999 u32 mba_vlan_cfg;
1000 #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff
1001 #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
1002 #define PORT_FEATURE_MBA_VLAN_EN 0x00010000
1003
1004 u32 resource_cfg;
1005 #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001
1006 #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002
1007 #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004
1008 #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008
1009 #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010
1010
1011 u32 smbus_config;
1012 #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
1013 #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
1014
1015 u32 vf_config;
1016 #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f
1017 #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0
1018 #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000
1019 #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001
1020 #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002
1021 #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003
1022 #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004
1023 #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005
1024 #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006
1025 #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007
1026 #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008
1027 #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009
1028 #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a
1029 #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b
1030 #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c
1031 #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d
1032 #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e
1033 #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f
1034
1035 u32 link_config; /* Used as HW defaults for the driver */
1036 #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
1037 #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
1038 /* (forced) low speed switch (< 10G) */
1039 #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
1040 /* (forced) high speed switch (>= 10G) */
1041 #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
1042 #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
1043 #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
1044
1045 #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000
1046 #define PORT_FEATURE_LINK_SPEED_SHIFT 16
1047 #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
1048 #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
1049 #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
1050 #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
1051 #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
1052 #define PORT_FEATURE_LINK_SPEED_1G 0x00050000
1053 #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
1054 #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
1055 #define PORT_FEATURE_LINK_SPEED_20G 0x00080000
1056
1057 #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
1058 #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
1059 #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
1060 #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
1061 #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
1062 #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
1063 #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
1064
1065 /* The default for MCP link configuration,
1066 uses the same defines as link_config */
1067 u32 mfw_wol_link_cfg;
1068
1069 /* The default for the driver of the second external phy,
1070 uses the same defines as link_config */
1071 u32 link_config2; /* 0x47C */
1072
1073 /* The default for MCP of the second external phy,
1074 uses the same defines as link_config */
1075 u32 mfw_wol_link_cfg2; /* 0x480 */
1076
1077 u32 Reserved2[17]; /* 0x484 */
1078
1079};
1080
1081
1082/****************************************************************************
1083 * Device Information *
1084 ****************************************************************************/
1085struct shm_dev_info { /* size */
1086
1087 u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
1088
1089 struct shared_hw_cfg shared_hw_config; /* 40 */
1090
1091 struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
1092
1093 struct shared_feat_cfg shared_feature_config; /* 4 */
1094
1095 struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
1096
1097};
1098
1099
1100#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
1101 #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
1102#endif
1103
1104#define FUNC_0 0
1105#define FUNC_1 1
1106#define FUNC_2 2
1107#define FUNC_3 3
1108#define FUNC_4 4
1109#define FUNC_5 5
1110#define FUNC_6 6
1111#define FUNC_7 7
1112#define E1_FUNC_MAX 2
1113#define E1H_FUNC_MAX 8
1114#define E2_FUNC_MAX 4 /* per path */
1115
1116#define VN_0 0
1117#define VN_1 1
1118#define VN_2 2
1119#define VN_3 3
1120#define E1VN_MAX 1
1121#define E1HVN_MAX 4
1122
1123#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */
1124/* This value (in milliseconds) determines the frequency of the driver
1125 * issuing the PULSE message code. The firmware monitors this periodic
1126 * pulse to determine when to switch to an OS-absent mode. */
1127#define DRV_PULSE_PERIOD_MS 250
1128
1129/* This value (in milliseconds) determines how long the driver should
1130 * wait for an acknowledgement from the firmware before timing out. Once
1131 * the firmware has timed out, the driver will assume there is no firmware
1132 * running and there won't be any firmware-driver synchronization during a
1133 * driver reset. */
1134#define FW_ACK_TIME_OUT_MS 5000
1135
1136#define FW_ACK_POLL_TIME_MS 1
1137
1138#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
1139
1140/* LED Blink rate that will achieve ~15.9Hz */
1141#define LED_BLINK_RATE_VAL 480
1142
1143/****************************************************************************
1144 * Driver <-> FW Mailbox *
1145 ****************************************************************************/
1146struct drv_port_mb {
1147
1148 u32 link_status;
1149 /* Driver should update this field on any link change event */
1150
1151 #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
1152 #define LINK_STATUS_LINK_UP 0x00000001
1153 #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
1154 #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
1155 #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
1156 #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
1157 #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
1158 #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
1159 #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
1160 #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
1161 #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
1162 #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
1163 #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
1164 #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
1165 #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
1166 #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
1167 #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
1168 #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1)
1169 #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1)
1170
1171 #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
1172 #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
1173
1174 #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
1175 #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
1176 #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
1177
1178 #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
1179 #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
1180 #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
1181 #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
1182 #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
1183 #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
1184 #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
1185
1186 #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
1187 #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
1188
1189 #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
1190 #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
1191
1192 #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
1193 #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
1194 #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
1195 #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
1196 #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
1197
1198 #define LINK_STATUS_SERDES_LINK 0x00100000
1199
1200 #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
1201 #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
1202 #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
1203 #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000
1204
1205 #define LINK_STATUS_PFC_ENABLED 0x20000000
1206
1207 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
1208
1209 u32 port_stx;
1210
1211 u32 stat_nig_timer;
1212
1213 /* MCP firmware does not use this field */
1214 u32 ext_phy_fw_version;
1215
1216};
1217
1218
1219struct drv_func_mb {
1220
1221 u32 drv_mb_header;
1222 #define DRV_MSG_CODE_MASK 0xffff0000
1223 #define DRV_MSG_CODE_LOAD_REQ 0x10000000
1224 #define DRV_MSG_CODE_LOAD_DONE 0x11000000
1225 #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
1226 #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
1227 #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
1228 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
1229 #define DRV_MSG_CODE_DCC_OK 0x30000000
1230 #define DRV_MSG_CODE_DCC_FAILURE 0x31000000
1231 #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
1232 #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
1233 #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
1234 #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
1235 #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
1236 #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
1237 #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
1238 /*
1239 * The optic module verification command requires bootcode
1240 * v5.0.6 or later, te specific optic module verification command
1241 * requires bootcode v5.2.12 or later
1242 */
1243 #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
1244 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
1245 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
1246 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
1247 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
1248
1249 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
1250 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
1251
1252 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
1253
1254 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
1255 #define REQ_BC_VER_4_SET_MF_BW 0x00060202
1256 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
1257
1258 #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
1259
1260 #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
1261 #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
1262 #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
1263 #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
1264
1265 #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
1266
1267 u32 drv_mb_param;
1268 #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
1269 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
1270
1271 u32 fw_mb_header;
1272 #define FW_MSG_CODE_MASK 0xffff0000
1273 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
1274 #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
1275 #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
1276 /* Load common chip is supported from bc 6.0.0 */
1277 #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
1278 #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
1279
1280 #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
1281 #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
1282 #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
1283 #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
1284 #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
1285 #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
1286 #define FW_MSG_CODE_DCC_DONE 0x30100000
1287 #define FW_MSG_CODE_LLDP_DONE 0x40100000
1288 #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
1289 #define FW_MSG_CODE_DIAG_REFUSE 0x50200000
1290 #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
1291 #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
1292 #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
1293 #define FW_MSG_CODE_GET_KEY_DONE 0x80100000
1294 #define FW_MSG_CODE_NO_KEY 0x80f00000
1295 #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
1296 #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
1297 #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
1298 #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
1299 #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
1300 #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
1301 #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
1302 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
1303 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
1304 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
1305
1306 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
1307 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
1308
1309 #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000
1310
1311 #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
1312 #define FW_MSG_CODE_LIC_RESPONSE 0xff020000
1313 #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
1314 #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
1315
1316 #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
1317
1318 u32 fw_mb_param;
1319
1320 u32 drv_pulse_mb;
1321 #define DRV_PULSE_SEQ_MASK 0x00007fff
1322 #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
1323 /*
1324 * The system time is in the format of
1325 * (year-2001)*12*32 + month*32 + day.
1326 */
1327 #define DRV_PULSE_ALWAYS_ALIVE 0x00008000
1328 /*
1329 * Indicate to the firmware not to go into the
1330 * OS-absent when it is not getting driver pulse.
1331 * This is used for debugging as well for PXE(MBA).
1332 */
1333
1334 u32 mcp_pulse_mb;
1335 #define MCP_PULSE_SEQ_MASK 0x00007fff
1336 #define MCP_PULSE_ALWAYS_ALIVE 0x00008000
1337 /* Indicates to the driver not to assert due to lack
1338 * of MCP response */
1339 #define MCP_EVENT_MASK 0xffff0000
1340 #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
1341
1342 u32 iscsi_boot_signature;
1343 u32 iscsi_boot_block_offset;
1344
1345 u32 drv_status;
1346 #define DRV_STATUS_PMF 0x00000001
1347 #define DRV_STATUS_VF_DISABLED 0x00000002
1348 #define DRV_STATUS_SET_MF_BW 0x00000004
1349 #define DRV_STATUS_LINK_EVENT 0x00000008
1350
1351 #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
1352 #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
1353 #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
1354 #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
1355 #define DRV_STATUS_DCC_RESERVED1 0x00000800
1356 #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
1357 #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
1358
1359 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
1360 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
1361
1362 u32 virt_mac_upper;
1363 #define VIRT_MAC_SIGN_MASK 0xffff0000
1364 #define VIRT_MAC_SIGNATURE 0x564d0000
1365 u32 virt_mac_lower;
1366
1367};
1368
1369
1370/****************************************************************************
1371 * Management firmware state *
1372 ****************************************************************************/
1373/* Allocate 440 bytes for management firmware */
1374#define MGMTFW_STATE_WORD_SIZE 110
1375
1376struct mgmtfw_state {
1377 u32 opaque[MGMTFW_STATE_WORD_SIZE];
1378};
1379
1380
1381/****************************************************************************
1382 * Multi-Function configuration *
1383 ****************************************************************************/
1384struct shared_mf_cfg {
1385
1386 u32 clp_mb;
1387 #define SHARED_MF_CLP_SET_DEFAULT 0x00000000
1388 /* set by CLP */
1389 #define SHARED_MF_CLP_EXIT 0x00000001
1390 /* set by MCP */
1391 #define SHARED_MF_CLP_EXIT_DONE 0x00010000
1392
1393};
1394
1395struct port_mf_cfg {
1396
1397 u32 dynamic_cfg; /* device control channel */
1398 #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
1399 #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
1400 #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
1401
1402 u32 reserved[3];
1403
1404};
1405
1406struct func_mf_cfg {
1407
1408 u32 config;
1409 /* E/R/I/D */
1410 /* function 0 of each port cannot be hidden */
1411 #define FUNC_MF_CFG_FUNC_HIDE 0x00000001
1412
1413 #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006
1414 #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000
1415 #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
1416 #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
1417 #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
1418 #define FUNC_MF_CFG_PROTOCOL_DEFAULT \
1419 FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
1420
1421 #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
1422 #define FUNC_MF_CFG_FUNC_DELETED 0x00000010
1423
1424 /* PRI */
1425 /* 0 - low priority, 3 - high priority */
1426 #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
1427 #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
1428 #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
1429
1430 /* MINBW, MAXBW */
1431 /* value range - 0..100, increments in 100Mbps */
1432 #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
1433 #define FUNC_MF_CFG_MIN_BW_SHIFT 16
1434 #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
1435 #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
1436 #define FUNC_MF_CFG_MAX_BW_SHIFT 24
1437 #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
1438
1439 u32 mac_upper; /* MAC */
1440 #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
1441 #define FUNC_MF_CFG_UPPERMAC_SHIFT 0
1442 #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
1443 u32 mac_lower;
1444 #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
1445
1446 u32 e1hov_tag; /* VNI */
1447 #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
1448 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
1449 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
1450
1451 u32 reserved[2];
1452};
1453
1454/* This structure is not applicable and should not be accessed on 57711 */
1455struct func_ext_cfg {
1456 u32 func_cfg;
1457 #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
1458 #define MACP_FUNC_CFG_FLAGS_SHIFT 0
1459 #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
1460 #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
1461 #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
1462 #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
1463
1464 u32 iscsi_mac_addr_upper;
1465 u32 iscsi_mac_addr_lower;
1466
1467 u32 fcoe_mac_addr_upper;
1468 u32 fcoe_mac_addr_lower;
1469
1470 u32 fcoe_wwn_port_name_upper;
1471 u32 fcoe_wwn_port_name_lower;
1472
1473 u32 fcoe_wwn_node_name_upper;
1474 u32 fcoe_wwn_node_name_lower;
1475
1476 u32 preserve_data;
1477 #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
1478 #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
1479 #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
1480 #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
1481 #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
1482 #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5)
1483};
1484
1485struct mf_cfg {
1486
1487 struct shared_mf_cfg shared_mf_config; /* 0x4 */
1488 struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */
1489 /* for all chips, there are 8 mf functions */
1490 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
1491 /*
1492 * Extended configuration per function - this array does not exist and
1493 * should not be accessed on 57711
1494 */
1495 struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
1496}; /* 0x224 */
1497
1498/****************************************************************************
1499 * Shared Memory Region *
1500 ****************************************************************************/
1501struct shmem_region { /* SharedMem Offset (size) */
1502
1503 u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
1504 #define SHR_MEM_FORMAT_REV_MASK 0xff000000
1505 #define SHR_MEM_FORMAT_REV_ID ('A'<<24)
1506 /* validity bits */
1507 #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
1508 #define SHR_MEM_VALIDITY_MB 0x00200000
1509 #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
1510 #define SHR_MEM_VALIDITY_RESERVED 0x00000007
1511 /* One licensing bit should be set */
1512 #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
1513 #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
1514 #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
1515 #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
1516 /* Active MFW */
1517 #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
1518 #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
1519 #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
1520 #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
1521 #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
1522 #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
1523
1524 struct shm_dev_info dev_info; /* 0x8 (0x438) */
1525
1526 struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
1527
1528 /* FW information (for internal FW use) */
1529 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
1530 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
1531
1532 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
1533
1534#ifdef BMAPI
1535 /* This is a variable length array */
1536 /* the number of function depends on the chip type */
1537 struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
1538#else
1539 /* the number of function depends on the chip type */
1540 struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
1541#endif /* BMAPI */
1542
1543}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
1544
1545/****************************************************************************
1546 * Shared Memory 2 Region *
1547 ****************************************************************************/
1548/* The fw_flr_ack is actually built in the following way: */
1549/* 8 bit: PF ack */
1550/* 64 bit: VF ack */
1551/* 8 bit: ios_dis_ack */
1552/* In order to maintain endianity in the mailbox hsi, we want to keep using */
1553/* u32. The fw must have the VF right after the PF since this is how it */
1554/* access arrays(it expects always the VF to reside after the PF, and that */
1555/* makes the calculation much easier for it. ) */
1556/* In order to answer both limitations, and keep the struct small, the code */
1557/* will abuse the structure defined here to achieve the actual partition */
1558/* above */
1559/****************************************************************************/
1560struct fw_flr_ack {
1561 u32 pf_ack;
1562 u32 vf_ack[1];
1563 u32 iov_dis_ack;
1564};
1565
1566struct fw_flr_mb {
1567 u32 aggint;
1568 u32 opgen_addr;
1569 struct fw_flr_ack ack;
1570};
1571
1572/**** SUPPORT FOR SHMEM ARRRAYS ***
1573 * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
1574 * define arrays with storage types smaller then unsigned dwords.
1575 * The macros below add generic support for SHMEM arrays with numeric elements
1576 * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
1577 * array with individual bit-filed elements accessed using shifts and masks.
1578 *
1579 */
1580
1581/* eb is the bitwidth of a single element */
1582#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
1583#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
1584
1585/* the bit-position macro allows the used to flip the order of the arrays
1586 * elements on a per byte or word boundary.
1587 *
1588 * example: an array with 8 entries each 4 bit wide. This array will fit into
1589 * a single dword. The diagrmas below show the array order of the nibbles.
1590 *
1591 * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
1592 *
1593 * | | | |
1594 * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
1595 * | | | |
1596 *
1597 * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
1598 *
1599 * | | | |
1600 * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
1601 * | | | |
1602 *
1603 * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
1604 *
1605 * | | | |
1606 * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
1607 * | | | |
1608 */
1609#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
1610 ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
1611 (((i)%((fb)/(eb))) * (eb)))
1612
1613#define SHMEM_ARRAY_GET(a, i, eb, fb) \
1614 ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
1615 SHMEM_ARRAY_MASK(eb))
1616
1617#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
1618do { \
1619 a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
1620 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1621 a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
1622 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1623} while (0)
1624
1625
1626/****START OF DCBX STRUCTURES DECLARATIONS****/
1627#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
1628#define DCBX_PRI_PG_BITWIDTH 4
1629#define DCBX_PRI_PG_FBITS 8
1630#define DCBX_PRI_PG_GET(a, i) \
1631 SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
1632#define DCBX_PRI_PG_SET(a, i, val) \
1633 SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
1634#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
1635#define DCBX_BW_PG_BITWIDTH 8
1636#define DCBX_PG_BW_GET(a, i) \
1637 SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
1638#define DCBX_PG_BW_SET(a, i, val) \
1639 SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
1640#define DCBX_STRICT_PRI_PG 15
1641#define DCBX_MAX_APP_PROTOCOL 16
1642#define FCOE_APP_IDX 0
1643#define ISCSI_APP_IDX 1
1644#define PREDEFINED_APP_IDX_MAX 2
1645
1646
1647/* Big/Little endian have the same representation. */
1648struct dcbx_ets_feature {
1649 /*
1650 * For Admin MIB - is this feature supported by the
1651 * driver | For Local MIB - should this feature be enabled.
1652 */
1653 u32 enabled;
1654 u32 pg_bw_tbl[2];
1655 u32 pri_pg_tbl[1];
1656};
1657
1658/* Driver structure in LE */
1659struct dcbx_pfc_feature {
1660#ifdef __BIG_ENDIAN
1661 u8 pri_en_bitmap;
1662 #define DCBX_PFC_PRI_0 0x01
1663 #define DCBX_PFC_PRI_1 0x02
1664 #define DCBX_PFC_PRI_2 0x04
1665 #define DCBX_PFC_PRI_3 0x08
1666 #define DCBX_PFC_PRI_4 0x10
1667 #define DCBX_PFC_PRI_5 0x20
1668 #define DCBX_PFC_PRI_6 0x40
1669 #define DCBX_PFC_PRI_7 0x80
1670 u8 pfc_caps;
1671 u8 reserved;
1672 u8 enabled;
1673#elif defined(__LITTLE_ENDIAN)
1674 u8 enabled;
1675 u8 reserved;
1676 u8 pfc_caps;
1677 u8 pri_en_bitmap;
1678 #define DCBX_PFC_PRI_0 0x01
1679 #define DCBX_PFC_PRI_1 0x02
1680 #define DCBX_PFC_PRI_2 0x04
1681 #define DCBX_PFC_PRI_3 0x08
1682 #define DCBX_PFC_PRI_4 0x10
1683 #define DCBX_PFC_PRI_5 0x20
1684 #define DCBX_PFC_PRI_6 0x40
1685 #define DCBX_PFC_PRI_7 0x80
1686#endif
1687};
1688
1689struct dcbx_app_priority_entry {
1690#ifdef __BIG_ENDIAN
1691 u16 app_id;
1692 u8 pri_bitmap;
1693 u8 appBitfield;
1694 #define DCBX_APP_ENTRY_VALID 0x01
1695 #define DCBX_APP_ENTRY_SF_MASK 0x30
1696 #define DCBX_APP_ENTRY_SF_SHIFT 4
1697 #define DCBX_APP_SF_ETH_TYPE 0x10
1698 #define DCBX_APP_SF_PORT 0x20
1699#elif defined(__LITTLE_ENDIAN)
1700 u8 appBitfield;
1701 #define DCBX_APP_ENTRY_VALID 0x01
1702 #define DCBX_APP_ENTRY_SF_MASK 0x30
1703 #define DCBX_APP_ENTRY_SF_SHIFT 4
1704 #define DCBX_APP_SF_ETH_TYPE 0x10
1705 #define DCBX_APP_SF_PORT 0x20
1706 u8 pri_bitmap;
1707 u16 app_id;
1708#endif
1709};
1710
1711
1712/* FW structure in BE */
1713struct dcbx_app_priority_feature {
1714#ifdef __BIG_ENDIAN
1715 u8 reserved;
1716 u8 default_pri;
1717 u8 tc_supported;
1718 u8 enabled;
1719#elif defined(__LITTLE_ENDIAN)
1720 u8 enabled;
1721 u8 tc_supported;
1722 u8 default_pri;
1723 u8 reserved;
1724#endif
1725 struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
1726};
1727
1728/* FW structure in BE */
1729struct dcbx_features {
1730 /* PG feature */
1731 struct dcbx_ets_feature ets;
1732 /* PFC feature */
1733 struct dcbx_pfc_feature pfc;
1734 /* APP feature */
1735 struct dcbx_app_priority_feature app;
1736};
1737
1738/* LLDP protocol parameters */
1739/* FW structure in BE */
1740struct lldp_params {
1741#ifdef __BIG_ENDIAN
1742 u8 msg_fast_tx_interval;
1743 u8 msg_tx_hold;
1744 u8 msg_tx_interval;
1745 u8 admin_status;
1746 #define LLDP_TX_ONLY 0x01
1747 #define LLDP_RX_ONLY 0x02
1748 #define LLDP_TX_RX 0x03
1749 #define LLDP_DISABLED 0x04
1750 u8 reserved1;
1751 u8 tx_fast;
1752 u8 tx_crd_max;
1753 u8 tx_crd;
1754#elif defined(__LITTLE_ENDIAN)
1755 u8 admin_status;
1756 #define LLDP_TX_ONLY 0x01
1757 #define LLDP_RX_ONLY 0x02
1758 #define LLDP_TX_RX 0x03
1759 #define LLDP_DISABLED 0x04
1760 u8 msg_tx_interval;
1761 u8 msg_tx_hold;
1762 u8 msg_fast_tx_interval;
1763 u8 tx_crd;
1764 u8 tx_crd_max;
1765 u8 tx_fast;
1766 u8 reserved1;
1767#endif
1768 #define REM_CHASSIS_ID_STAT_LEN 4
1769 #define REM_PORT_ID_STAT_LEN 4
1770 /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
1771 u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
1772 /* Holds remote Port ID TLV header, subtype and 9B of payload. */
1773 u32 peer_port_id[REM_PORT_ID_STAT_LEN];
1774};
1775
1776struct lldp_dcbx_stat {
1777 #define LOCAL_CHASSIS_ID_STAT_LEN 2
1778 #define LOCAL_PORT_ID_STAT_LEN 2
1779 /* Holds local Chassis ID 8B payload of constant subtype 4. */
1780 u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
1781 /* Holds local Port ID 8B payload of constant subtype 3. */
1782 u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
1783 /* Number of DCBX frames transmitted. */
1784 u32 num_tx_dcbx_pkts;
1785 /* Number of DCBX frames received. */
1786 u32 num_rx_dcbx_pkts;
1787};
1788
1789/* ADMIN MIB - DCBX local machine default configuration. */
1790struct lldp_admin_mib {
1791 u32 ver_cfg_flags;
1792 #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
1793 #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
1794 #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
1795 #define DCBX_ETS_RECO_TX_ENABLED 0x00000008
1796 #define DCBX_ETS_RECO_VALID 0x00000010
1797 #define DCBX_ETS_WILLING 0x00000020
1798 #define DCBX_PFC_WILLING 0x00000040
1799 #define DCBX_APP_WILLING 0x00000080
1800 #define DCBX_VERSION_CEE 0x00000100
1801 #define DCBX_VERSION_IEEE 0x00000200
1802 #define DCBX_DCBX_ENABLED 0x00000400
1803 #define DCBX_CEE_VERSION_MASK 0x0000f000
1804 #define DCBX_CEE_VERSION_SHIFT 12
1805 #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
1806 #define DCBX_CEE_MAX_VERSION_SHIFT 16
1807 struct dcbx_features features;
1808};
1809
1810/* REMOTE MIB - remote machine DCBX configuration. */
1811struct lldp_remote_mib {
1812 u32 prefix_seq_num;
1813 u32 flags;
1814 #define DCBX_ETS_TLV_RX 0x00000001
1815 #define DCBX_PFC_TLV_RX 0x00000002
1816 #define DCBX_APP_TLV_RX 0x00000004
1817 #define DCBX_ETS_RX_ERROR 0x00000010
1818 #define DCBX_PFC_RX_ERROR 0x00000020
1819 #define DCBX_APP_RX_ERROR 0x00000040
1820 #define DCBX_ETS_REM_WILLING 0x00000100
1821 #define DCBX_PFC_REM_WILLING 0x00000200
1822 #define DCBX_APP_REM_WILLING 0x00000400
1823 #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
1824 #define DCBX_REMOTE_MIB_VALID 0x00002000
1825 struct dcbx_features features;
1826 u32 suffix_seq_num;
1827};
1828
1829/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
1830struct lldp_local_mib {
1831 u32 prefix_seq_num;
1832 /* Indicates if there is mismatch with negotiation results. */
1833 u32 error;
1834 #define DCBX_LOCAL_ETS_ERROR 0x00000001
1835 #define DCBX_LOCAL_PFC_ERROR 0x00000002
1836 #define DCBX_LOCAL_APP_ERROR 0x00000004
1837 #define DCBX_LOCAL_PFC_MISMATCH 0x00000010
1838 #define DCBX_LOCAL_APP_MISMATCH 0x00000020
1839 #define DCBX_REMOTE_MIB_ERROR 0x00000040
1840 struct dcbx_features features;
1841 u32 suffix_seq_num;
1842};
1843/***END OF DCBX STRUCTURES DECLARATIONS***/
1844
1845struct ncsi_oem_fcoe_features {
1846 u32 fcoe_features1;
1847 #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
1848 #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0
1849
1850 #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000
1851 #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16
1852
1853 u32 fcoe_features2;
1854 #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF
1855 #define FCOE_FEATURES2_EXCHANGES_OFFSET 0
1856
1857 #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000
1858 #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16
1859
1860 u32 fcoe_features3;
1861 #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF
1862 #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0
1863
1864 #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000
1865 #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16
1866
1867 u32 fcoe_features4;
1868 #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F
1869 #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0
1870};
1871
1872struct ncsi_oem_data {
1873 u32 driver_version[4];
1874 struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
1875};
1876
1877struct shmem2_region {
1878
1879 u32 size; /* 0x0000 */
1880
1881 u32 dcc_support; /* 0x0004 */
1882 #define SHMEM_DCC_SUPPORT_NONE 0x00000000
1883 #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
1884 #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
1885 #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
1886 #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
1887 #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
1888
1889 u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */
1890 /*
1891 * For backwards compatibility, if the mf_cfg_addr does not exist
1892 * (the size filed is smaller than 0xc) the mf_cfg resides at the
1893 * end of struct shmem_region
1894 */
1895 u32 mf_cfg_addr; /* 0x0010 */
1896 #define SHMEM_MF_CFG_ADDR_NONE 0x00000000
1897
1898 struct fw_flr_mb flr_mb; /* 0x0014 */
1899 u32 dcbx_lldp_params_offset; /* 0x0028 */
1900 #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
1901 u32 dcbx_neg_res_offset; /* 0x002c */
1902 #define SHMEM_DCBX_NEG_RES_NONE 0x00000000
1903 u32 dcbx_remote_mib_offset; /* 0x0030 */
1904 #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
1905 /*
1906 * The other shmemX_base_addr holds the other path's shmem address
1907 * required for example in case of common phy init, or for path1 to know
1908 * the address of mcp debug trace which is located in offset from shmem
1909 * of path0
1910 */
1911 u32 other_shmem_base_addr; /* 0x0034 */
1912 u32 other_shmem2_base_addr; /* 0x0038 */
1913 /*
1914 * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
1915 * which were disabled/flred
1916 */
1917 u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */
1918
1919 /*
1920 * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
1921 * VFs
1922 */
1923 u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
1924
1925 u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */
1926 #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
1927
1928 /*
1929 * edebug_driver_if field is used to transfer messages between edebug
1930 * app to the driver through shmem2.
1931 *
1932 * message format:
1933 * bits 0-2 - function number / instance of driver to perform request
1934 * bits 3-5 - op code / is_ack?
1935 * bits 6-63 - data
1936 */
1937 u32 edebug_driver_if[2]; /* 0x0068 */
1938 #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1
1939 #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2
1940 #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3
1941
1942 u32 nvm_retain_bitmap_addr; /* 0x0070 */
1943
1944 u32 reserved1; /* 0x0074 */
1945
1946 u32 reserved2[E2_FUNC_MAX];
1947
1948 u32 reserved3[E2_FUNC_MAX];/* 0x0088 */
1949 u32 reserved4[E2_FUNC_MAX];/* 0x0098 */
1950
1951 u32 swim_base_addr; /* 0x0108 */
1952 u32 swim_funcs;
1953 u32 swim_main_cb;
1954
1955 u32 reserved5[2];
1956
1957 /* generic flags controlled by the driver */
1958 u32 drv_flags;
1959 #define DRV_FLAGS_DCB_CONFIGURED 0x1
1960
1961 /* pointer to extended dev_info shared data copied from nvm image */
1962 u32 extended_dev_info_shared_addr;
1963 u32 ncsi_oem_data_addr;
1964
1965 u32 ocsd_host_addr;
1966 u32 ocbb_host_addr;
1967 u32 ocsd_req_update_interval;
1968};
1969
1970
1971struct emac_stats {
1972 u32 rx_stat_ifhcinoctets;
1973 u32 rx_stat_ifhcinbadoctets;
1974 u32 rx_stat_etherstatsfragments;
1975 u32 rx_stat_ifhcinucastpkts;
1976 u32 rx_stat_ifhcinmulticastpkts;
1977 u32 rx_stat_ifhcinbroadcastpkts;
1978 u32 rx_stat_dot3statsfcserrors;
1979 u32 rx_stat_dot3statsalignmenterrors;
1980 u32 rx_stat_dot3statscarriersenseerrors;
1981 u32 rx_stat_xonpauseframesreceived;
1982 u32 rx_stat_xoffpauseframesreceived;
1983 u32 rx_stat_maccontrolframesreceived;
1984 u32 rx_stat_xoffstateentered;
1985 u32 rx_stat_dot3statsframestoolong;
1986 u32 rx_stat_etherstatsjabbers;
1987 u32 rx_stat_etherstatsundersizepkts;
1988 u32 rx_stat_etherstatspkts64octets;
1989 u32 rx_stat_etherstatspkts65octetsto127octets;
1990 u32 rx_stat_etherstatspkts128octetsto255octets;
1991 u32 rx_stat_etherstatspkts256octetsto511octets;
1992 u32 rx_stat_etherstatspkts512octetsto1023octets;
1993 u32 rx_stat_etherstatspkts1024octetsto1522octets;
1994 u32 rx_stat_etherstatspktsover1522octets;
1995
1996 u32 rx_stat_falsecarriererrors;
1997
1998 u32 tx_stat_ifhcoutoctets;
1999 u32 tx_stat_ifhcoutbadoctets;
2000 u32 tx_stat_etherstatscollisions;
2001 u32 tx_stat_outxonsent;
2002 u32 tx_stat_outxoffsent;
2003 u32 tx_stat_flowcontroldone;
2004 u32 tx_stat_dot3statssinglecollisionframes;
2005 u32 tx_stat_dot3statsmultiplecollisionframes;
2006 u32 tx_stat_dot3statsdeferredtransmissions;
2007 u32 tx_stat_dot3statsexcessivecollisions;
2008 u32 tx_stat_dot3statslatecollisions;
2009 u32 tx_stat_ifhcoutucastpkts;
2010 u32 tx_stat_ifhcoutmulticastpkts;
2011 u32 tx_stat_ifhcoutbroadcastpkts;
2012 u32 tx_stat_etherstatspkts64octets;
2013 u32 tx_stat_etherstatspkts65octetsto127octets;
2014 u32 tx_stat_etherstatspkts128octetsto255octets;
2015 u32 tx_stat_etherstatspkts256octetsto511octets;
2016 u32 tx_stat_etherstatspkts512octetsto1023octets;
2017 u32 tx_stat_etherstatspkts1024octetsto1522octets;
2018 u32 tx_stat_etherstatspktsover1522octets;
2019 u32 tx_stat_dot3statsinternalmactransmiterrors;
2020};
2021
2022
2023struct bmac1_stats {
2024 u32 tx_stat_gtpkt_lo;
2025 u32 tx_stat_gtpkt_hi;
2026 u32 tx_stat_gtxpf_lo;
2027 u32 tx_stat_gtxpf_hi;
2028 u32 tx_stat_gtfcs_lo;
2029 u32 tx_stat_gtfcs_hi;
2030 u32 tx_stat_gtmca_lo;
2031 u32 tx_stat_gtmca_hi;
2032 u32 tx_stat_gtbca_lo;
2033 u32 tx_stat_gtbca_hi;
2034 u32 tx_stat_gtfrg_lo;
2035 u32 tx_stat_gtfrg_hi;
2036 u32 tx_stat_gtovr_lo;
2037 u32 tx_stat_gtovr_hi;
2038 u32 tx_stat_gt64_lo;
2039 u32 tx_stat_gt64_hi;
2040 u32 tx_stat_gt127_lo;
2041 u32 tx_stat_gt127_hi;
2042 u32 tx_stat_gt255_lo;
2043 u32 tx_stat_gt255_hi;
2044 u32 tx_stat_gt511_lo;
2045 u32 tx_stat_gt511_hi;
2046 u32 tx_stat_gt1023_lo;
2047 u32 tx_stat_gt1023_hi;
2048 u32 tx_stat_gt1518_lo;
2049 u32 tx_stat_gt1518_hi;
2050 u32 tx_stat_gt2047_lo;
2051 u32 tx_stat_gt2047_hi;
2052 u32 tx_stat_gt4095_lo;
2053 u32 tx_stat_gt4095_hi;
2054 u32 tx_stat_gt9216_lo;
2055 u32 tx_stat_gt9216_hi;
2056 u32 tx_stat_gt16383_lo;
2057 u32 tx_stat_gt16383_hi;
2058 u32 tx_stat_gtmax_lo;
2059 u32 tx_stat_gtmax_hi;
2060 u32 tx_stat_gtufl_lo;
2061 u32 tx_stat_gtufl_hi;
2062 u32 tx_stat_gterr_lo;
2063 u32 tx_stat_gterr_hi;
2064 u32 tx_stat_gtbyt_lo;
2065 u32 tx_stat_gtbyt_hi;
2066
2067 u32 rx_stat_gr64_lo;
2068 u32 rx_stat_gr64_hi;
2069 u32 rx_stat_gr127_lo;
2070 u32 rx_stat_gr127_hi;
2071 u32 rx_stat_gr255_lo;
2072 u32 rx_stat_gr255_hi;
2073 u32 rx_stat_gr511_lo;
2074 u32 rx_stat_gr511_hi;
2075 u32 rx_stat_gr1023_lo;
2076 u32 rx_stat_gr1023_hi;
2077 u32 rx_stat_gr1518_lo;
2078 u32 rx_stat_gr1518_hi;
2079 u32 rx_stat_gr2047_lo;
2080 u32 rx_stat_gr2047_hi;
2081 u32 rx_stat_gr4095_lo;
2082 u32 rx_stat_gr4095_hi;
2083 u32 rx_stat_gr9216_lo;
2084 u32 rx_stat_gr9216_hi;
2085 u32 rx_stat_gr16383_lo;
2086 u32 rx_stat_gr16383_hi;
2087 u32 rx_stat_grmax_lo;
2088 u32 rx_stat_grmax_hi;
2089 u32 rx_stat_grpkt_lo;
2090 u32 rx_stat_grpkt_hi;
2091 u32 rx_stat_grfcs_lo;
2092 u32 rx_stat_grfcs_hi;
2093 u32 rx_stat_grmca_lo;
2094 u32 rx_stat_grmca_hi;
2095 u32 rx_stat_grbca_lo;
2096 u32 rx_stat_grbca_hi;
2097 u32 rx_stat_grxcf_lo;
2098 u32 rx_stat_grxcf_hi;
2099 u32 rx_stat_grxpf_lo;
2100 u32 rx_stat_grxpf_hi;
2101 u32 rx_stat_grxuo_lo;
2102 u32 rx_stat_grxuo_hi;
2103 u32 rx_stat_grjbr_lo;
2104 u32 rx_stat_grjbr_hi;
2105 u32 rx_stat_grovr_lo;
2106 u32 rx_stat_grovr_hi;
2107 u32 rx_stat_grflr_lo;
2108 u32 rx_stat_grflr_hi;
2109 u32 rx_stat_grmeg_lo;
2110 u32 rx_stat_grmeg_hi;
2111 u32 rx_stat_grmeb_lo;
2112 u32 rx_stat_grmeb_hi;
2113 u32 rx_stat_grbyt_lo;
2114 u32 rx_stat_grbyt_hi;
2115 u32 rx_stat_grund_lo;
2116 u32 rx_stat_grund_hi;
2117 u32 rx_stat_grfrg_lo;
2118 u32 rx_stat_grfrg_hi;
2119 u32 rx_stat_grerb_lo;
2120 u32 rx_stat_grerb_hi;
2121 u32 rx_stat_grfre_lo;
2122 u32 rx_stat_grfre_hi;
2123 u32 rx_stat_gripj_lo;
2124 u32 rx_stat_gripj_hi;
2125};
2126
2127struct bmac2_stats {
2128 u32 tx_stat_gtpk_lo; /* gtpok */
2129 u32 tx_stat_gtpk_hi; /* gtpok */
2130 u32 tx_stat_gtxpf_lo; /* gtpf */
2131 u32 tx_stat_gtxpf_hi; /* gtpf */
2132 u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
2133 u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
2134 u32 tx_stat_gtfcs_lo;
2135 u32 tx_stat_gtfcs_hi;
2136 u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
2137 u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
2138 u32 tx_stat_gtmca_lo;
2139 u32 tx_stat_gtmca_hi;
2140 u32 tx_stat_gtbca_lo;
2141 u32 tx_stat_gtbca_hi;
2142 u32 tx_stat_gtovr_lo;
2143 u32 tx_stat_gtovr_hi;
2144 u32 tx_stat_gtfrg_lo;
2145 u32 tx_stat_gtfrg_hi;
2146 u32 tx_stat_gtpkt1_lo; /* gtpkt */
2147 u32 tx_stat_gtpkt1_hi; /* gtpkt */
2148 u32 tx_stat_gt64_lo;
2149 u32 tx_stat_gt64_hi;
2150 u32 tx_stat_gt127_lo;
2151 u32 tx_stat_gt127_hi;
2152 u32 tx_stat_gt255_lo;
2153 u32 tx_stat_gt255_hi;
2154 u32 tx_stat_gt511_lo;
2155 u32 tx_stat_gt511_hi;
2156 u32 tx_stat_gt1023_lo;
2157 u32 tx_stat_gt1023_hi;
2158 u32 tx_stat_gt1518_lo;
2159 u32 tx_stat_gt1518_hi;
2160 u32 tx_stat_gt2047_lo;
2161 u32 tx_stat_gt2047_hi;
2162 u32 tx_stat_gt4095_lo;
2163 u32 tx_stat_gt4095_hi;
2164 u32 tx_stat_gt9216_lo;
2165 u32 tx_stat_gt9216_hi;
2166 u32 tx_stat_gt16383_lo;
2167 u32 tx_stat_gt16383_hi;
2168 u32 tx_stat_gtmax_lo;
2169 u32 tx_stat_gtmax_hi;
2170 u32 tx_stat_gtufl_lo;
2171 u32 tx_stat_gtufl_hi;
2172 u32 tx_stat_gterr_lo;
2173 u32 tx_stat_gterr_hi;
2174 u32 tx_stat_gtbyt_lo;
2175 u32 tx_stat_gtbyt_hi;
2176
2177 u32 rx_stat_gr64_lo;
2178 u32 rx_stat_gr64_hi;
2179 u32 rx_stat_gr127_lo;
2180 u32 rx_stat_gr127_hi;
2181 u32 rx_stat_gr255_lo;
2182 u32 rx_stat_gr255_hi;
2183 u32 rx_stat_gr511_lo;
2184 u32 rx_stat_gr511_hi;
2185 u32 rx_stat_gr1023_lo;
2186 u32 rx_stat_gr1023_hi;
2187 u32 rx_stat_gr1518_lo;
2188 u32 rx_stat_gr1518_hi;
2189 u32 rx_stat_gr2047_lo;
2190 u32 rx_stat_gr2047_hi;
2191 u32 rx_stat_gr4095_lo;
2192 u32 rx_stat_gr4095_hi;
2193 u32 rx_stat_gr9216_lo;
2194 u32 rx_stat_gr9216_hi;
2195 u32 rx_stat_gr16383_lo;
2196 u32 rx_stat_gr16383_hi;
2197 u32 rx_stat_grmax_lo;
2198 u32 rx_stat_grmax_hi;
2199 u32 rx_stat_grpkt_lo;
2200 u32 rx_stat_grpkt_hi;
2201 u32 rx_stat_grfcs_lo;
2202 u32 rx_stat_grfcs_hi;
2203 u32 rx_stat_gruca_lo;
2204 u32 rx_stat_gruca_hi;
2205 u32 rx_stat_grmca_lo;
2206 u32 rx_stat_grmca_hi;
2207 u32 rx_stat_grbca_lo;
2208 u32 rx_stat_grbca_hi;
2209 u32 rx_stat_grxpf_lo; /* grpf */
2210 u32 rx_stat_grxpf_hi; /* grpf */
2211 u32 rx_stat_grpp_lo;
2212 u32 rx_stat_grpp_hi;
2213 u32 rx_stat_grxuo_lo; /* gruo */
2214 u32 rx_stat_grxuo_hi; /* gruo */
2215 u32 rx_stat_grjbr_lo;
2216 u32 rx_stat_grjbr_hi;
2217 u32 rx_stat_grovr_lo;
2218 u32 rx_stat_grovr_hi;
2219 u32 rx_stat_grxcf_lo; /* grcf */
2220 u32 rx_stat_grxcf_hi; /* grcf */
2221 u32 rx_stat_grflr_lo;
2222 u32 rx_stat_grflr_hi;
2223 u32 rx_stat_grpok_lo;
2224 u32 rx_stat_grpok_hi;
2225 u32 rx_stat_grmeg_lo;
2226 u32 rx_stat_grmeg_hi;
2227 u32 rx_stat_grmeb_lo;
2228 u32 rx_stat_grmeb_hi;
2229 u32 rx_stat_grbyt_lo;
2230 u32 rx_stat_grbyt_hi;
2231 u32 rx_stat_grund_lo;
2232 u32 rx_stat_grund_hi;
2233 u32 rx_stat_grfrg_lo;
2234 u32 rx_stat_grfrg_hi;
2235 u32 rx_stat_grerb_lo; /* grerrbyt */
2236 u32 rx_stat_grerb_hi; /* grerrbyt */
2237 u32 rx_stat_grfre_lo; /* grfrerr */
2238 u32 rx_stat_grfre_hi; /* grfrerr */
2239 u32 rx_stat_gripj_lo;
2240 u32 rx_stat_gripj_hi;
2241};
2242
2243struct mstat_stats {
2244 struct {
2245 /* OTE MSTAT on E3 has a bug where this register's contents are
2246 * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
2247 */
2248 u32 tx_gtxpok_lo;
2249 u32 tx_gtxpok_hi;
2250 u32 tx_gtxpf_lo;
2251 u32 tx_gtxpf_hi;
2252 u32 tx_gtxpp_lo;
2253 u32 tx_gtxpp_hi;
2254 u32 tx_gtfcs_lo;
2255 u32 tx_gtfcs_hi;
2256 u32 tx_gtuca_lo;
2257 u32 tx_gtuca_hi;
2258 u32 tx_gtmca_lo;
2259 u32 tx_gtmca_hi;
2260 u32 tx_gtgca_lo;
2261 u32 tx_gtgca_hi;
2262 u32 tx_gtpkt_lo;
2263 u32 tx_gtpkt_hi;
2264 u32 tx_gt64_lo;
2265 u32 tx_gt64_hi;
2266 u32 tx_gt127_lo;
2267 u32 tx_gt127_hi;
2268 u32 tx_gt255_lo;
2269 u32 tx_gt255_hi;
2270 u32 tx_gt511_lo;
2271 u32 tx_gt511_hi;
2272 u32 tx_gt1023_lo;
2273 u32 tx_gt1023_hi;
2274 u32 tx_gt1518_lo;
2275 u32 tx_gt1518_hi;
2276 u32 tx_gt2047_lo;
2277 u32 tx_gt2047_hi;
2278 u32 tx_gt4095_lo;
2279 u32 tx_gt4095_hi;
2280 u32 tx_gt9216_lo;
2281 u32 tx_gt9216_hi;
2282 u32 tx_gt16383_lo;
2283 u32 tx_gt16383_hi;
2284 u32 tx_gtufl_lo;
2285 u32 tx_gtufl_hi;
2286 u32 tx_gterr_lo;
2287 u32 tx_gterr_hi;
2288 u32 tx_gtbyt_lo;
2289 u32 tx_gtbyt_hi;
2290 u32 tx_collisions_lo;
2291 u32 tx_collisions_hi;
2292 u32 tx_singlecollision_lo;
2293 u32 tx_singlecollision_hi;
2294 u32 tx_multiplecollisions_lo;
2295 u32 tx_multiplecollisions_hi;
2296 u32 tx_deferred_lo;
2297 u32 tx_deferred_hi;
2298 u32 tx_excessivecollisions_lo;
2299 u32 tx_excessivecollisions_hi;
2300 u32 tx_latecollisions_lo;
2301 u32 tx_latecollisions_hi;
2302 } stats_tx;
2303
2304 struct {
2305 u32 rx_gr64_lo;
2306 u32 rx_gr64_hi;
2307 u32 rx_gr127_lo;
2308 u32 rx_gr127_hi;
2309 u32 rx_gr255_lo;
2310 u32 rx_gr255_hi;
2311 u32 rx_gr511_lo;
2312 u32 rx_gr511_hi;
2313 u32 rx_gr1023_lo;
2314 u32 rx_gr1023_hi;
2315 u32 rx_gr1518_lo;
2316 u32 rx_gr1518_hi;
2317 u32 rx_gr2047_lo;
2318 u32 rx_gr2047_hi;
2319 u32 rx_gr4095_lo;
2320 u32 rx_gr4095_hi;
2321 u32 rx_gr9216_lo;
2322 u32 rx_gr9216_hi;
2323 u32 rx_gr16383_lo;
2324 u32 rx_gr16383_hi;
2325 u32 rx_grpkt_lo;
2326 u32 rx_grpkt_hi;
2327 u32 rx_grfcs_lo;
2328 u32 rx_grfcs_hi;
2329 u32 rx_gruca_lo;
2330 u32 rx_gruca_hi;
2331 u32 rx_grmca_lo;
2332 u32 rx_grmca_hi;
2333 u32 rx_grbca_lo;
2334 u32 rx_grbca_hi;
2335 u32 rx_grxpf_lo;
2336 u32 rx_grxpf_hi;
2337 u32 rx_grxpp_lo;
2338 u32 rx_grxpp_hi;
2339 u32 rx_grxuo_lo;
2340 u32 rx_grxuo_hi;
2341 u32 rx_grovr_lo;
2342 u32 rx_grovr_hi;
2343 u32 rx_grxcf_lo;
2344 u32 rx_grxcf_hi;
2345 u32 rx_grflr_lo;
2346 u32 rx_grflr_hi;
2347 u32 rx_grpok_lo;
2348 u32 rx_grpok_hi;
2349 u32 rx_grbyt_lo;
2350 u32 rx_grbyt_hi;
2351 u32 rx_grund_lo;
2352 u32 rx_grund_hi;
2353 u32 rx_grfrg_lo;
2354 u32 rx_grfrg_hi;
2355 u32 rx_grerb_lo;
2356 u32 rx_grerb_hi;
2357 u32 rx_grfre_lo;
2358 u32 rx_grfre_hi;
2359
2360 u32 rx_alignmenterrors_lo;
2361 u32 rx_alignmenterrors_hi;
2362 u32 rx_falsecarrier_lo;
2363 u32 rx_falsecarrier_hi;
2364 u32 rx_llfcmsgcnt_lo;
2365 u32 rx_llfcmsgcnt_hi;
2366 } stats_rx;
2367};
2368
2369union mac_stats {
2370 struct emac_stats emac_stats;
2371 struct bmac1_stats bmac1_stats;
2372 struct bmac2_stats bmac2_stats;
2373 struct mstat_stats mstat_stats;
2374};
2375
2376
2377struct mac_stx {
2378 /* in_bad_octets */
2379 u32 rx_stat_ifhcinbadoctets_hi;
2380 u32 rx_stat_ifhcinbadoctets_lo;
2381
2382 /* out_bad_octets */
2383 u32 tx_stat_ifhcoutbadoctets_hi;
2384 u32 tx_stat_ifhcoutbadoctets_lo;
2385
2386 /* crc_receive_errors */
2387 u32 rx_stat_dot3statsfcserrors_hi;
2388 u32 rx_stat_dot3statsfcserrors_lo;
2389 /* alignment_errors */
2390 u32 rx_stat_dot3statsalignmenterrors_hi;
2391 u32 rx_stat_dot3statsalignmenterrors_lo;
2392 /* carrier_sense_errors */
2393 u32 rx_stat_dot3statscarriersenseerrors_hi;
2394 u32 rx_stat_dot3statscarriersenseerrors_lo;
2395 /* false_carrier_detections */
2396 u32 rx_stat_falsecarriererrors_hi;
2397 u32 rx_stat_falsecarriererrors_lo;
2398
2399 /* runt_packets_received */
2400 u32 rx_stat_etherstatsundersizepkts_hi;
2401 u32 rx_stat_etherstatsundersizepkts_lo;
2402 /* jabber_packets_received */
2403 u32 rx_stat_dot3statsframestoolong_hi;
2404 u32 rx_stat_dot3statsframestoolong_lo;
2405
2406 /* error_runt_packets_received */
2407 u32 rx_stat_etherstatsfragments_hi;
2408 u32 rx_stat_etherstatsfragments_lo;
2409 /* error_jabber_packets_received */
2410 u32 rx_stat_etherstatsjabbers_hi;
2411 u32 rx_stat_etherstatsjabbers_lo;
2412
2413 /* control_frames_received */
2414 u32 rx_stat_maccontrolframesreceived_hi;
2415 u32 rx_stat_maccontrolframesreceived_lo;
2416 u32 rx_stat_mac_xpf_hi;
2417 u32 rx_stat_mac_xpf_lo;
2418 u32 rx_stat_mac_xcf_hi;
2419 u32 rx_stat_mac_xcf_lo;
2420
2421 /* xoff_state_entered */
2422 u32 rx_stat_xoffstateentered_hi;
2423 u32 rx_stat_xoffstateentered_lo;
2424 /* pause_xon_frames_received */
2425 u32 rx_stat_xonpauseframesreceived_hi;
2426 u32 rx_stat_xonpauseframesreceived_lo;
2427 /* pause_xoff_frames_received */
2428 u32 rx_stat_xoffpauseframesreceived_hi;
2429 u32 rx_stat_xoffpauseframesreceived_lo;
2430 /* pause_xon_frames_transmitted */
2431 u32 tx_stat_outxonsent_hi;
2432 u32 tx_stat_outxonsent_lo;
2433 /* pause_xoff_frames_transmitted */
2434 u32 tx_stat_outxoffsent_hi;
2435 u32 tx_stat_outxoffsent_lo;
2436 /* flow_control_done */
2437 u32 tx_stat_flowcontroldone_hi;
2438 u32 tx_stat_flowcontroldone_lo;
2439
2440 /* ether_stats_collisions */
2441 u32 tx_stat_etherstatscollisions_hi;
2442 u32 tx_stat_etherstatscollisions_lo;
2443 /* single_collision_transmit_frames */
2444 u32 tx_stat_dot3statssinglecollisionframes_hi;
2445 u32 tx_stat_dot3statssinglecollisionframes_lo;
2446 /* multiple_collision_transmit_frames */
2447 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
2448 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
2449 /* deferred_transmissions */
2450 u32 tx_stat_dot3statsdeferredtransmissions_hi;
2451 u32 tx_stat_dot3statsdeferredtransmissions_lo;
2452 /* excessive_collision_frames */
2453 u32 tx_stat_dot3statsexcessivecollisions_hi;
2454 u32 tx_stat_dot3statsexcessivecollisions_lo;
2455 /* late_collision_frames */
2456 u32 tx_stat_dot3statslatecollisions_hi;
2457 u32 tx_stat_dot3statslatecollisions_lo;
2458
2459 /* frames_transmitted_64_bytes */
2460 u32 tx_stat_etherstatspkts64octets_hi;
2461 u32 tx_stat_etherstatspkts64octets_lo;
2462 /* frames_transmitted_65_127_bytes */
2463 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
2464 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
2465 /* frames_transmitted_128_255_bytes */
2466 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
2467 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
2468 /* frames_transmitted_256_511_bytes */
2469 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
2470 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
2471 /* frames_transmitted_512_1023_bytes */
2472 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
2473 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
2474 /* frames_transmitted_1024_1522_bytes */
2475 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
2476 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
2477 /* frames_transmitted_1523_9022_bytes */
2478 u32 tx_stat_etherstatspktsover1522octets_hi;
2479 u32 tx_stat_etherstatspktsover1522octets_lo;
2480 u32 tx_stat_mac_2047_hi;
2481 u32 tx_stat_mac_2047_lo;
2482 u32 tx_stat_mac_4095_hi;
2483 u32 tx_stat_mac_4095_lo;
2484 u32 tx_stat_mac_9216_hi;
2485 u32 tx_stat_mac_9216_lo;
2486 u32 tx_stat_mac_16383_hi;
2487 u32 tx_stat_mac_16383_lo;
2488
2489 /* internal_mac_transmit_errors */
2490 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
2491 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
2492
2493 /* if_out_discards */
2494 u32 tx_stat_mac_ufl_hi;
2495 u32 tx_stat_mac_ufl_lo;
2496};
2497
2498
2499#define MAC_STX_IDX_MAX 2
2500
2501struct host_port_stats {
2502 u32 host_port_stats_start;
2503
2504 struct mac_stx mac_stx[MAC_STX_IDX_MAX];
2505
2506 u32 brb_drop_hi;
2507 u32 brb_drop_lo;
2508
2509 u32 host_port_stats_end;
2510};
2511
2512
2513struct host_func_stats {
2514 u32 host_func_stats_start;
2515
2516 u32 total_bytes_received_hi;
2517 u32 total_bytes_received_lo;
2518
2519 u32 total_bytes_transmitted_hi;
2520 u32 total_bytes_transmitted_lo;
2521
2522 u32 total_unicast_packets_received_hi;
2523 u32 total_unicast_packets_received_lo;
2524
2525 u32 total_multicast_packets_received_hi;
2526 u32 total_multicast_packets_received_lo;
2527
2528 u32 total_broadcast_packets_received_hi;
2529 u32 total_broadcast_packets_received_lo;
2530
2531 u32 total_unicast_packets_transmitted_hi;
2532 u32 total_unicast_packets_transmitted_lo;
2533
2534 u32 total_multicast_packets_transmitted_hi;
2535 u32 total_multicast_packets_transmitted_lo;
2536
2537 u32 total_broadcast_packets_transmitted_hi;
2538 u32 total_broadcast_packets_transmitted_lo;
2539
2540 u32 valid_bytes_received_hi;
2541 u32 valid_bytes_received_lo;
2542
2543 u32 host_func_stats_end;
2544};
2545
2546/* VIC definitions */
2547#define VICSTATST_UIF_INDEX 2
2548
2549#define BCM_5710_FW_MAJOR_VERSION 7
2550#define BCM_5710_FW_MINOR_VERSION 0
2551#define BCM_5710_FW_REVISION_VERSION 23
2552#define BCM_5710_FW_ENGINEERING_VERSION 0
2553#define BCM_5710_FW_COMPILE_FLAGS 1
2554
2555
2556/*
2557 * attention bits
2558 */
2559struct atten_sp_status_block {
2560 __le32 attn_bits;
2561 __le32 attn_bits_ack;
2562 u8 status_block_id;
2563 u8 reserved0;
2564 __le16 attn_bits_index;
2565 __le32 reserved1;
2566};
2567
2568
2569/*
2570 * The eth aggregative context of Cstorm
2571 */
2572struct cstorm_eth_ag_context {
2573 u32 __reserved0[10];
2574};
2575
2576
2577/*
2578 * dmae command structure
2579 */
2580struct dmae_command {
2581 u32 opcode;
2582#define DMAE_COMMAND_SRC (0x1<<0)
2583#define DMAE_COMMAND_SRC_SHIFT 0
2584#define DMAE_COMMAND_DST (0x3<<1)
2585#define DMAE_COMMAND_DST_SHIFT 1
2586#define DMAE_COMMAND_C_DST (0x1<<3)
2587#define DMAE_COMMAND_C_DST_SHIFT 3
2588#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
2589#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
2590#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
2591#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
2592#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
2593#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
2594#define DMAE_COMMAND_ENDIANITY (0x3<<9)
2595#define DMAE_COMMAND_ENDIANITY_SHIFT 9
2596#define DMAE_COMMAND_PORT (0x1<<11)
2597#define DMAE_COMMAND_PORT_SHIFT 11
2598#define DMAE_COMMAND_CRC_RESET (0x1<<12)
2599#define DMAE_COMMAND_CRC_RESET_SHIFT 12
2600#define DMAE_COMMAND_SRC_RESET (0x1<<13)
2601#define DMAE_COMMAND_SRC_RESET_SHIFT 13
2602#define DMAE_COMMAND_DST_RESET (0x1<<14)
2603#define DMAE_COMMAND_DST_RESET_SHIFT 14
2604#define DMAE_COMMAND_E1HVN (0x3<<15)
2605#define DMAE_COMMAND_E1HVN_SHIFT 15
2606#define DMAE_COMMAND_DST_VN (0x3<<17)
2607#define DMAE_COMMAND_DST_VN_SHIFT 17
2608#define DMAE_COMMAND_C_FUNC (0x1<<19)
2609#define DMAE_COMMAND_C_FUNC_SHIFT 19
2610#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
2611#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
2612#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
2613#define DMAE_COMMAND_RESERVED0_SHIFT 22
2614 u32 src_addr_lo;
2615 u32 src_addr_hi;
2616 u32 dst_addr_lo;
2617 u32 dst_addr_hi;
2618#if defined(__BIG_ENDIAN)
2619 u16 opcode_iov;
2620#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
2621#define DMAE_COMMAND_SRC_VFID_SHIFT 0
2622#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
2623#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
2624#define DMAE_COMMAND_RESERVED1 (0x1<<7)
2625#define DMAE_COMMAND_RESERVED1_SHIFT 7
2626#define DMAE_COMMAND_DST_VFID (0x3F<<8)
2627#define DMAE_COMMAND_DST_VFID_SHIFT 8
2628#define DMAE_COMMAND_DST_VFPF (0x1<<14)
2629#define DMAE_COMMAND_DST_VFPF_SHIFT 14
2630#define DMAE_COMMAND_RESERVED2 (0x1<<15)
2631#define DMAE_COMMAND_RESERVED2_SHIFT 15
2632 u16 len;
2633#elif defined(__LITTLE_ENDIAN)
2634 u16 len;
2635 u16 opcode_iov;
2636#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
2637#define DMAE_COMMAND_SRC_VFID_SHIFT 0
2638#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
2639#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
2640#define DMAE_COMMAND_RESERVED1 (0x1<<7)
2641#define DMAE_COMMAND_RESERVED1_SHIFT 7
2642#define DMAE_COMMAND_DST_VFID (0x3F<<8)
2643#define DMAE_COMMAND_DST_VFID_SHIFT 8
2644#define DMAE_COMMAND_DST_VFPF (0x1<<14)
2645#define DMAE_COMMAND_DST_VFPF_SHIFT 14
2646#define DMAE_COMMAND_RESERVED2 (0x1<<15)
2647#define DMAE_COMMAND_RESERVED2_SHIFT 15
2648#endif
2649 u32 comp_addr_lo;
2650 u32 comp_addr_hi;
2651 u32 comp_val;
2652 u32 crc32;
2653 u32 crc32_c;
2654#if defined(__BIG_ENDIAN)
2655 u16 crc16_c;
2656 u16 crc16;
2657#elif defined(__LITTLE_ENDIAN)
2658 u16 crc16;
2659 u16 crc16_c;
2660#endif
2661#if defined(__BIG_ENDIAN)
2662 u16 reserved3;
2663 u16 crc_t10;
2664#elif defined(__LITTLE_ENDIAN)
2665 u16 crc_t10;
2666 u16 reserved3;
2667#endif
2668#if defined(__BIG_ENDIAN)
2669 u16 xsum8;
2670 u16 xsum16;
2671#elif defined(__LITTLE_ENDIAN)
2672 u16 xsum16;
2673 u16 xsum8;
2674#endif
2675};
2676
2677
2678/*
2679 * common data for all protocols
2680 */
2681struct doorbell_hdr {
2682 u8 header;
2683#define DOORBELL_HDR_RX (0x1<<0)
2684#define DOORBELL_HDR_RX_SHIFT 0
2685#define DOORBELL_HDR_DB_TYPE (0x1<<1)
2686#define DOORBELL_HDR_DB_TYPE_SHIFT 1
2687#define DOORBELL_HDR_DPM_SIZE (0x3<<2)
2688#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
2689#define DOORBELL_HDR_CONN_TYPE (0xF<<4)
2690#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
2691};
2692
2693/*
2694 * Ethernet doorbell
2695 */
2696struct eth_tx_doorbell {
2697#if defined(__BIG_ENDIAN)
2698 u16 npackets;
2699 u8 params;
2700#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
2701#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
2702#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
2703#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
2704#define ETH_TX_DOORBELL_SPARE (0x1<<7)
2705#define ETH_TX_DOORBELL_SPARE_SHIFT 7
2706 struct doorbell_hdr hdr;
2707#elif defined(__LITTLE_ENDIAN)
2708 struct doorbell_hdr hdr;
2709 u8 params;
2710#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
2711#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
2712#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
2713#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
2714#define ETH_TX_DOORBELL_SPARE (0x1<<7)
2715#define ETH_TX_DOORBELL_SPARE_SHIFT 7
2716 u16 npackets;
2717#endif
2718};
2719
2720
2721/*
2722 * 3 lines. status block
2723 */
2724struct hc_status_block_e1x {
2725 __le16 index_values[HC_SB_MAX_INDICES_E1X];
2726 __le16 running_index[HC_SB_MAX_SM];
2727 __le32 rsrv[11];
2728};
2729
2730/*
2731 * host status block
2732 */
2733struct host_hc_status_block_e1x {
2734 struct hc_status_block_e1x sb;
2735};
2736
2737
2738/*
2739 * 3 lines. status block
2740 */
2741struct hc_status_block_e2 {
2742 __le16 index_values[HC_SB_MAX_INDICES_E2];
2743 __le16 running_index[HC_SB_MAX_SM];
2744 __le32 reserved[11];
2745};
2746
2747/*
2748 * host status block
2749 */
2750struct host_hc_status_block_e2 {
2751 struct hc_status_block_e2 sb;
2752};
2753
2754
2755/*
2756 * 5 lines. slow-path status block
2757 */
2758struct hc_sp_status_block {
2759 __le16 index_values[HC_SP_SB_MAX_INDICES];
2760 __le16 running_index;
2761 __le16 rsrv;
2762 u32 rsrv1;
2763};
2764
2765/*
2766 * host status block
2767 */
2768struct host_sp_status_block {
2769 struct atten_sp_status_block atten_status_block;
2770 struct hc_sp_status_block sp_sb;
2771};
2772
2773
2774/*
2775 * IGU driver acknowledgment register
2776 */
2777struct igu_ack_register {
2778#if defined(__BIG_ENDIAN)
2779 u16 sb_id_and_flags;
2780#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
2781#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
2782#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
2783#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
2784#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
2785#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
2786#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
2787#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
2788#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
2789#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
2790 u16 status_block_index;
2791#elif defined(__LITTLE_ENDIAN)
2792 u16 status_block_index;
2793 u16 sb_id_and_flags;
2794#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
2795#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
2796#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
2797#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
2798#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
2799#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
2800#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
2801#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
2802#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
2803#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
2804#endif
2805};
2806
2807
2808/*
2809 * IGU driver acknowledgement register
2810 */
2811struct igu_backward_compatible {
2812 u32 sb_id_and_flags;
2813#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
2814#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
2815#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
2816#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
2817#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
2818#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
2819#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
2820#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
2821#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
2822#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
2823#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
2824#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
2825 u32 reserved_2;
2826};
2827
2828
2829/*
2830 * IGU driver acknowledgement register
2831 */
2832struct igu_regular {
2833 u32 sb_id_and_flags;
2834#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
2835#define IGU_REGULAR_SB_INDEX_SHIFT 0
2836#define IGU_REGULAR_RESERVED0 (0x1<<20)
2837#define IGU_REGULAR_RESERVED0_SHIFT 20
2838#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
2839#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
2840#define IGU_REGULAR_BUPDATE (0x1<<24)
2841#define IGU_REGULAR_BUPDATE_SHIFT 24
2842#define IGU_REGULAR_ENABLE_INT (0x3<<25)
2843#define IGU_REGULAR_ENABLE_INT_SHIFT 25
2844#define IGU_REGULAR_RESERVED_1 (0x1<<27)
2845#define IGU_REGULAR_RESERVED_1_SHIFT 27
2846#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
2847#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
2848#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
2849#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
2850#define IGU_REGULAR_BCLEANUP (0x1<<31)
2851#define IGU_REGULAR_BCLEANUP_SHIFT 31
2852 u32 reserved_2;
2853};
2854
2855/*
2856 * IGU driver acknowledgement register
2857 */
2858union igu_consprod_reg {
2859 struct igu_regular regular;
2860 struct igu_backward_compatible backward_compatible;
2861};
2862
2863
2864/*
2865 * Igu control commands
2866 */
2867enum igu_ctrl_cmd {
2868 IGU_CTRL_CMD_TYPE_RD,
2869 IGU_CTRL_CMD_TYPE_WR,
2870 MAX_IGU_CTRL_CMD
2871};
2872
2873
2874/*
2875 * Control register for the IGU command register
2876 */
2877struct igu_ctrl_reg {
2878 u32 ctrl_data;
2879#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
2880#define IGU_CTRL_REG_ADDRESS_SHIFT 0
2881#define IGU_CTRL_REG_FID (0x7F<<12)
2882#define IGU_CTRL_REG_FID_SHIFT 12
2883#define IGU_CTRL_REG_RESERVED (0x1<<19)
2884#define IGU_CTRL_REG_RESERVED_SHIFT 19
2885#define IGU_CTRL_REG_TYPE (0x1<<20)
2886#define IGU_CTRL_REG_TYPE_SHIFT 20
2887#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
2888#define IGU_CTRL_REG_UNUSED_SHIFT 21
2889};
2890
2891
2892/*
2893 * Igu interrupt command
2894 */
2895enum igu_int_cmd {
2896 IGU_INT_ENABLE,
2897 IGU_INT_DISABLE,
2898 IGU_INT_NOP,
2899 IGU_INT_NOP2,
2900 MAX_IGU_INT_CMD
2901};
2902
2903
2904/*
2905 * Igu segments
2906 */
2907enum igu_seg_access {
2908 IGU_SEG_ACCESS_NORM,
2909 IGU_SEG_ACCESS_DEF,
2910 IGU_SEG_ACCESS_ATTN,
2911 MAX_IGU_SEG_ACCESS
2912};
2913
2914
2915/*
2916 * Parser parsing flags field
2917 */
2918struct parsing_flags {
2919 __le16 flags;
2920#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
2921#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
2922#define PARSING_FLAGS_VLAN (0x1<<1)
2923#define PARSING_FLAGS_VLAN_SHIFT 1
2924#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
2925#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
2926#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
2927#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
2928#define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
2929#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
2930#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6)
2931#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
2932#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7)
2933#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
2934#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9)
2935#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
2936#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10)
2937#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
2938#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11)
2939#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
2940#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12)
2941#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
2942#define PARSING_FLAGS_LLC_SNAP (0x1<<13)
2943#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
2944#define PARSING_FLAGS_RESERVED0 (0x3<<14)
2945#define PARSING_FLAGS_RESERVED0_SHIFT 14
2946};
2947
2948
2949/*
2950 * Parsing flags for TCP ACK type
2951 */
2952enum prs_flags_ack_type {
2953 PRS_FLAG_PUREACK_PIGGY,
2954 PRS_FLAG_PUREACK_PURE,
2955 MAX_PRS_FLAGS_ACK_TYPE
2956};
2957
2958
2959/*
2960 * Parsing flags for Ethernet address type
2961 */
2962enum prs_flags_eth_addr_type {
2963 PRS_FLAG_ETHTYPE_NON_UNICAST,
2964 PRS_FLAG_ETHTYPE_UNICAST,
2965 MAX_PRS_FLAGS_ETH_ADDR_TYPE
2966};
2967
2968
2969/*
2970 * Parsing flags for over-ethernet protocol
2971 */
2972enum prs_flags_over_eth {
2973 PRS_FLAG_OVERETH_UNKNOWN,
2974 PRS_FLAG_OVERETH_IPV4,
2975 PRS_FLAG_OVERETH_IPV6,
2976 PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
2977 MAX_PRS_FLAGS_OVER_ETH
2978};
2979
2980
2981/*
2982 * Parsing flags for over-IP protocol
2983 */
2984enum prs_flags_over_ip {
2985 PRS_FLAG_OVERIP_UNKNOWN,
2986 PRS_FLAG_OVERIP_TCP,
2987 PRS_FLAG_OVERIP_UDP,
2988 MAX_PRS_FLAGS_OVER_IP
2989};
2990
2991
2992/*
2993 * SDM operation gen command (generate aggregative interrupt)
2994 */
2995struct sdm_op_gen {
2996 __le32 command;
2997#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
2998#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
2999#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
3000#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
3001#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
3002#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
3003#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
3004#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
3005#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
3006#define SDM_OP_GEN_RESERVED_SHIFT 17
3007};
3008
3009
3010/*
3011 * Timers connection context
3012 */
3013struct timers_block_context {
3014 u32 __reserved_0;
3015 u32 __reserved_1;
3016 u32 __reserved_2;
3017 u32 flags;
3018#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
3019#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
3020#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
3021#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
3022#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
3023#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
3024};
3025
3026
3027/*
3028 * The eth aggregative context of Tstorm
3029 */
3030struct tstorm_eth_ag_context {
3031 u32 __reserved0[14];
3032};
3033
3034
3035/*
3036 * The eth aggregative context of Ustorm
3037 */
3038struct ustorm_eth_ag_context {
3039 u32 __reserved0;
3040#if defined(__BIG_ENDIAN)
3041 u8 cdu_usage;
3042 u8 __reserved2;
3043 u16 __reserved1;
3044#elif defined(__LITTLE_ENDIAN)
3045 u16 __reserved1;
3046 u8 __reserved2;
3047 u8 cdu_usage;
3048#endif
3049 u32 __reserved3[6];
3050};
3051
3052
3053/*
3054 * The eth aggregative context of Xstorm
3055 */
3056struct xstorm_eth_ag_context {
3057 u32 reserved0;
3058#if defined(__BIG_ENDIAN)
3059 u8 cdu_reserved;
3060 u8 reserved2;
3061 u16 reserved1;
3062#elif defined(__LITTLE_ENDIAN)
3063 u16 reserved1;
3064 u8 reserved2;
3065 u8 cdu_reserved;
3066#endif
3067 u32 reserved3[30];
3068};
3069
3070
3071/*
3072 * doorbell message sent to the chip
3073 */
3074struct doorbell {
3075#if defined(__BIG_ENDIAN)
3076 u16 zero_fill2;
3077 u8 zero_fill1;
3078 struct doorbell_hdr header;
3079#elif defined(__LITTLE_ENDIAN)
3080 struct doorbell_hdr header;
3081 u8 zero_fill1;
3082 u16 zero_fill2;
3083#endif
3084};
3085
3086
3087/*
3088 * doorbell message sent to the chip
3089 */
3090struct doorbell_set_prod {
3091#if defined(__BIG_ENDIAN)
3092 u16 prod;
3093 u8 zero_fill1;
3094 struct doorbell_hdr header;
3095#elif defined(__LITTLE_ENDIAN)
3096 struct doorbell_hdr header;
3097 u8 zero_fill1;
3098 u16 prod;
3099#endif
3100};
3101
3102
3103struct regpair {
3104 __le32 lo;
3105 __le32 hi;
3106};
3107
3108
3109/*
3110 * Classify rule opcodes in E2/E3
3111 */
3112enum classify_rule {
3113 CLASSIFY_RULE_OPCODE_MAC,
3114 CLASSIFY_RULE_OPCODE_VLAN,
3115 CLASSIFY_RULE_OPCODE_PAIR,
3116 MAX_CLASSIFY_RULE
3117};
3118
3119
3120/*
3121 * Classify rule types in E2/E3
3122 */
3123enum classify_rule_action_type {
3124 CLASSIFY_RULE_REMOVE,
3125 CLASSIFY_RULE_ADD,
3126 MAX_CLASSIFY_RULE_ACTION_TYPE
3127};
3128
3129
3130/*
3131 * client init ramrod data
3132 */
3133struct client_init_general_data {
3134 u8 client_id;
3135 u8 statistics_counter_id;
3136 u8 statistics_en_flg;
3137 u8 is_fcoe_flg;
3138 u8 activate_flg;
3139 u8 sp_client_id;
3140 __le16 mtu;
3141 u8 statistics_zero_flg;
3142 u8 func_id;
3143 u8 cos;
3144 u8 traffic_type;
3145 u32 reserved0;
3146};
3147
3148
3149/*
3150 * client init rx data
3151 */
3152struct client_init_rx_data {
3153 u8 tpa_en;
3154#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0)
3155#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
3156#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
3157#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
3158#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
3159#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
3160 u8 vmqueue_mode_en_flg;
3161 u8 extra_data_over_sgl_en_flg;
3162 u8 cache_line_alignment_log_size;
3163 u8 enable_dynamic_hc;
3164 u8 max_sges_for_packet;
3165 u8 client_qzone_id;
3166 u8 drop_ip_cs_err_flg;
3167 u8 drop_tcp_cs_err_flg;
3168 u8 drop_ttl0_flg;
3169 u8 drop_udp_cs_err_flg;
3170 u8 inner_vlan_removal_enable_flg;
3171 u8 outer_vlan_removal_enable_flg;
3172 u8 status_block_id;
3173 u8 rx_sb_index_number;
3174 u8 reserved0;
3175 u8 max_tpa_queues;
3176 u8 silent_vlan_removal_flg;
3177 __le16 max_bytes_on_bd;
3178 __le16 sge_buff_size;
3179 u8 approx_mcast_engine_id;
3180 u8 rss_engine_id;
3181 struct regpair bd_page_base;
3182 struct regpair sge_page_base;
3183 struct regpair cqe_page_base;
3184 u8 is_leading_rss;
3185 u8 is_approx_mcast;
3186 __le16 max_agg_size;
3187 __le16 state;
3188#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0)
3189#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
3190#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1)
3191#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
3192#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2)
3193#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
3194#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3)
3195#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
3196#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4)
3197#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
3198#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5)
3199#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
3200#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6)
3201#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
3202#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7)
3203#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
3204 __le16 cqe_pause_thr_low;
3205 __le16 cqe_pause_thr_high;
3206 __le16 bd_pause_thr_low;
3207 __le16 bd_pause_thr_high;
3208 __le16 sge_pause_thr_low;
3209 __le16 sge_pause_thr_high;
3210 __le16 rx_cos_mask;
3211 __le16 silent_vlan_value;
3212 __le16 silent_vlan_mask;
3213 __le32 reserved6[2];
3214};
3215
3216/*
3217 * client init tx data
3218 */
3219struct client_init_tx_data {
3220 u8 enforce_security_flg;
3221 u8 tx_status_block_id;
3222 u8 tx_sb_index_number;
3223 u8 tss_leading_client_id;
3224 u8 tx_switching_flg;
3225 u8 anti_spoofing_flg;
3226 __le16 default_vlan;
3227 struct regpair tx_bd_page_base;
3228 __le16 state;
3229#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0)
3230#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
3231#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1)
3232#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
3233#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2)
3234#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
3235#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
3236#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
3237#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
3238#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
3239 u8 default_vlan_flg;
3240 u8 reserved2;
3241 __le32 reserved3;
3242};
3243
3244/*
3245 * client init ramrod data
3246 */
3247struct client_init_ramrod_data {
3248 struct client_init_general_data general;
3249 struct client_init_rx_data rx;
3250 struct client_init_tx_data tx;
3251};
3252
3253
3254/*
3255 * client update ramrod data
3256 */
3257struct client_update_ramrod_data {
3258 u8 client_id;
3259 u8 func_id;
3260 u8 inner_vlan_removal_enable_flg;
3261 u8 inner_vlan_removal_change_flg;
3262 u8 outer_vlan_removal_enable_flg;
3263 u8 outer_vlan_removal_change_flg;
3264 u8 anti_spoofing_enable_flg;
3265 u8 anti_spoofing_change_flg;
3266 u8 activate_flg;
3267 u8 activate_change_flg;
3268 __le16 default_vlan;
3269 u8 default_vlan_enable_flg;
3270 u8 default_vlan_change_flg;
3271 __le16 silent_vlan_value;
3272 __le16 silent_vlan_mask;
3273 u8 silent_vlan_removal_flg;
3274 u8 silent_vlan_change_flg;
3275 __le32 echo;
3276};
3277
3278
3279/*
3280 * The eth storm context of Cstorm
3281 */
3282struct cstorm_eth_st_context {
3283 u32 __reserved0[4];
3284};
3285
3286
3287struct double_regpair {
3288 u32 regpair0_lo;
3289 u32 regpair0_hi;
3290 u32 regpair1_lo;
3291 u32 regpair1_hi;
3292};
3293
3294
3295/*
3296 * Ethernet address typesm used in ethernet tx BDs
3297 */
3298enum eth_addr_type {
3299 UNKNOWN_ADDRESS,
3300 UNICAST_ADDRESS,
3301 MULTICAST_ADDRESS,
3302 BROADCAST_ADDRESS,
3303 MAX_ETH_ADDR_TYPE
3304};
3305
3306
3307/*
3308 *
3309 */
3310struct eth_classify_cmd_header {
3311 u8 cmd_general_data;
3312#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0)
3313#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
3314#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1)
3315#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
3316#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2)
3317#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
3318#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4)
3319#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
3320#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5)
3321#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
3322 u8 func_id;
3323 u8 client_id;
3324 u8 reserved1;
3325};
3326
3327
3328/*
3329 * header for eth classification config ramrod
3330 */
3331struct eth_classify_header {
3332 u8 rule_cnt;
3333 u8 reserved0;
3334 __le16 reserved1;
3335 __le32 echo;
3336};
3337
3338
3339/*
3340 * Command for adding/removing a MAC classification rule
3341 */
3342struct eth_classify_mac_cmd {
3343 struct eth_classify_cmd_header header;
3344 __le32 reserved0;
3345 __le16 mac_lsb;
3346 __le16 mac_mid;
3347 __le16 mac_msb;
3348 __le16 reserved1;
3349};
3350
3351
3352/*
3353 * Command for adding/removing a MAC-VLAN pair classification rule
3354 */
3355struct eth_classify_pair_cmd {
3356 struct eth_classify_cmd_header header;
3357 __le32 reserved0;
3358 __le16 mac_lsb;
3359 __le16 mac_mid;
3360 __le16 mac_msb;
3361 __le16 vlan;
3362};
3363
3364
3365/*
3366 * Command for adding/removing a VLAN classification rule
3367 */
3368struct eth_classify_vlan_cmd {
3369 struct eth_classify_cmd_header header;
3370 __le32 reserved0;
3371 __le32 reserved1;
3372 __le16 reserved2;
3373 __le16 vlan;
3374};
3375
3376/*
3377 * union for eth classification rule
3378 */
3379union eth_classify_rule_cmd {
3380 struct eth_classify_mac_cmd mac;
3381 struct eth_classify_vlan_cmd vlan;
3382 struct eth_classify_pair_cmd pair;
3383};
3384
3385/*
3386 * parameters for eth classification configuration ramrod
3387 */
3388struct eth_classify_rules_ramrod_data {
3389 struct eth_classify_header header;
3390 union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
3391};
3392
3393
3394/*
3395 * The data contain client ID need to the ramrod
3396 */
3397struct eth_common_ramrod_data {
3398 __le32 client_id;
3399 __le32 reserved1;
3400};
3401
3402
3403/*
3404 * The eth storm context of Ustorm
3405 */
3406struct ustorm_eth_st_context {
3407 u32 reserved0[52];
3408};
3409
3410/*
3411 * The eth storm context of Tstorm
3412 */
3413struct tstorm_eth_st_context {
3414 u32 __reserved0[28];
3415};
3416
3417/*
3418 * The eth storm context of Xstorm
3419 */
3420struct xstorm_eth_st_context {
3421 u32 reserved0[60];
3422};
3423
3424/*
3425 * Ethernet connection context
3426 */
3427struct eth_context {
3428 struct ustorm_eth_st_context ustorm_st_context;
3429 struct tstorm_eth_st_context tstorm_st_context;
3430 struct xstorm_eth_ag_context xstorm_ag_context;
3431 struct tstorm_eth_ag_context tstorm_ag_context;
3432 struct cstorm_eth_ag_context cstorm_ag_context;
3433 struct ustorm_eth_ag_context ustorm_ag_context;
3434 struct timers_block_context timers_context;
3435 struct xstorm_eth_st_context xstorm_st_context;
3436 struct cstorm_eth_st_context cstorm_st_context;
3437};
3438
3439
3440/*
3441 * union for sgl and raw data.
3442 */
3443union eth_sgl_or_raw_data {
3444 __le16 sgl[8];
3445 u32 raw_data[4];
3446};
3447
3448/*
3449 * eth FP end aggregation CQE parameters struct
3450 */
3451struct eth_end_agg_rx_cqe {
3452 u8 type_error_flags;
3453#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0)
3454#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
3455#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2)
3456#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
3457#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3)
3458#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
3459 u8 reserved1;
3460 u8 queue_index;
3461 u8 reserved2;
3462 __le32 timestamp_delta;
3463 __le16 num_of_coalesced_segs;
3464 __le16 pkt_len;
3465 u8 pure_ack_count;
3466 u8 reserved3;
3467 __le16 reserved4;
3468 union eth_sgl_or_raw_data sgl_or_raw_data;
3469 __le32 reserved5[8];
3470};
3471
3472
3473/*
3474 * regular eth FP CQE parameters struct
3475 */
3476struct eth_fast_path_rx_cqe {
3477 u8 type_error_flags;
3478#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0)
3479#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
3480#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2)
3481#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
3482#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3)
3483#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
3484#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4)
3485#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
3486#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
3487#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
3488#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
3489#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
3490 u8 status_flags;
3491#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
3492#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
3493#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
3494#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
3495#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
3496#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
3497#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
3498#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
3499#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
3500#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
3501#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
3502#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
3503 u8 queue_index;
3504 u8 placement_offset;
3505 __le32 rss_hash_result;
3506 __le16 vlan_tag;
3507 __le16 pkt_len;
3508 __le16 len_on_bd;
3509 struct parsing_flags pars_flags;
3510 union eth_sgl_or_raw_data sgl_or_raw_data;
3511 __le32 reserved1[8];
3512};
3513
3514
3515/*
3516 * Command for setting classification flags for a client
3517 */
3518struct eth_filter_rules_cmd {
3519 u8 cmd_general_data;
3520#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0)
3521#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
3522#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1)
3523#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
3524#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2)
3525#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
3526 u8 func_id;
3527 u8 client_id;
3528 u8 reserved1;
3529 __le16 state;
3530#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0)
3531#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
3532#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1)
3533#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
3534#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2)
3535#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
3536#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3)
3537#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
3538#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4)
3539#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
3540#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5)
3541#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
3542#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6)
3543#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
3544#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7)
3545#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
3546 __le16 reserved3;
3547 struct regpair reserved4;
3548};
3549
3550
3551/*
3552 * parameters for eth classification filters ramrod
3553 */
3554struct eth_filter_rules_ramrod_data {
3555 struct eth_classify_header header;
3556 struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
3557};
3558
3559
3560/*
3561 * parameters for eth classification configuration ramrod
3562 */
3563struct eth_general_rules_ramrod_data {
3564 struct eth_classify_header header;
3565 union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
3566};
3567
3568
3569/*
3570 * The data for Halt ramrod
3571 */
3572struct eth_halt_ramrod_data {
3573 __le32 client_id;
3574 __le32 reserved0;
3575};
3576
3577
3578/*
3579 * Command for setting multicast classification for a client
3580 */
3581struct eth_multicast_rules_cmd {
3582 u8 cmd_general_data;
3583#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
3584#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
3585#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1)
3586#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
3587#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2)
3588#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
3589#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3)
3590#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
3591 u8 func_id;
3592 u8 bin_id;
3593 u8 engine_id;
3594 __le32 reserved2;
3595 struct regpair reserved3;
3596};
3597
3598
3599/*
3600 * parameters for multicast classification ramrod
3601 */
3602struct eth_multicast_rules_ramrod_data {
3603 struct eth_classify_header header;
3604 struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
3605};
3606
3607
3608/*
3609 * Place holder for ramrods protocol specific data
3610 */
3611struct ramrod_data {
3612 __le32 data_lo;
3613 __le32 data_hi;
3614};
3615
3616/*
3617 * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
3618 */
3619union eth_ramrod_data {
3620 struct ramrod_data general;
3621};
3622
3623
3624/*
3625 * RSS toeplitz hash type, as reported in CQE
3626 */
3627enum eth_rss_hash_type {
3628 DEFAULT_HASH_TYPE,
3629 IPV4_HASH_TYPE,
3630 TCP_IPV4_HASH_TYPE,
3631 IPV6_HASH_TYPE,
3632 TCP_IPV6_HASH_TYPE,
3633 VLAN_PRI_HASH_TYPE,
3634 E1HOV_PRI_HASH_TYPE,
3635 DSCP_HASH_TYPE,
3636 MAX_ETH_RSS_HASH_TYPE
3637};
3638
3639
3640/*
3641 * Ethernet RSS mode
3642 */
3643enum eth_rss_mode {
3644 ETH_RSS_MODE_DISABLED,
3645 ETH_RSS_MODE_REGULAR,
3646 ETH_RSS_MODE_VLAN_PRI,
3647 ETH_RSS_MODE_E1HOV_PRI,
3648 ETH_RSS_MODE_IP_DSCP,
3649 MAX_ETH_RSS_MODE
3650};
3651
3652
3653/*
3654 * parameters for RSS update ramrod (E2)
3655 */
3656struct eth_rss_update_ramrod_data {
3657 u8 rss_engine_id;
3658 u8 capabilities;
3659#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
3660#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
3661#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
3662#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
3663#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
3664#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
3665#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
3666#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
3667#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
3668#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
3669#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
3670#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
3671#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
3672#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
3673#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
3674#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
3675 u8 rss_result_mask;
3676 u8 rss_mode;
3677 __le32 __reserved2;
3678 u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
3679 __le32 rss_key[T_ETH_RSS_KEY];
3680 __le32 echo;
3681 __le32 reserved3;
3682};
3683
3684
3685/*
3686 * The eth Rx Buffer Descriptor
3687 */
3688struct eth_rx_bd {
3689 __le32 addr_lo;
3690 __le32 addr_hi;
3691};
3692
3693
3694/*
3695 * Eth Rx Cqe structure- general structure for ramrods
3696 */
3697struct common_ramrod_eth_rx_cqe {
3698 u8 ramrod_type;
3699#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0)
3700#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
3701#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2)
3702#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
3703#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3)
3704#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
3705 u8 conn_type;
3706 __le16 reserved1;
3707 __le32 conn_and_cmd_data;
3708#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
3709#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
3710#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
3711#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
3712 struct ramrod_data protocol_data;
3713 __le32 echo;
3714 __le32 reserved2[11];
3715};
3716
3717/*
3718 * Rx Last CQE in page (in ETH)
3719 */
3720struct eth_rx_cqe_next_page {
3721 __le32 addr_lo;
3722 __le32 addr_hi;
3723 __le32 reserved[14];
3724};
3725
3726/*
3727 * union for all eth rx cqe types (fix their sizes)
3728 */
3729union eth_rx_cqe {
3730 struct eth_fast_path_rx_cqe fast_path_cqe;
3731 struct common_ramrod_eth_rx_cqe ramrod_cqe;
3732 struct eth_rx_cqe_next_page next_page_cqe;
3733 struct eth_end_agg_rx_cqe end_agg_cqe;
3734};
3735
3736
3737/*
3738 * Values for RX ETH CQE type field
3739 */
3740enum eth_rx_cqe_type {
3741 RX_ETH_CQE_TYPE_ETH_FASTPATH,
3742 RX_ETH_CQE_TYPE_ETH_RAMROD,
3743 RX_ETH_CQE_TYPE_ETH_START_AGG,
3744 RX_ETH_CQE_TYPE_ETH_STOP_AGG,
3745 MAX_ETH_RX_CQE_TYPE
3746};
3747
3748
3749/*
3750 * Type of SGL/Raw field in ETH RX fast path CQE
3751 */
3752enum eth_rx_fp_sel {
3753 ETH_FP_CQE_REGULAR,
3754 ETH_FP_CQE_RAW,
3755 MAX_ETH_RX_FP_SEL
3756};
3757
3758
3759/*
3760 * The eth Rx SGE Descriptor
3761 */
3762struct eth_rx_sge {
3763 __le32 addr_lo;
3764 __le32 addr_hi;
3765};
3766
3767
3768/*
3769 * common data for all protocols
3770 */
3771struct spe_hdr {
3772 __le32 conn_and_cmd_data;
3773#define SPE_HDR_CID (0xFFFFFF<<0)
3774#define SPE_HDR_CID_SHIFT 0
3775#define SPE_HDR_CMD_ID (0xFF<<24)
3776#define SPE_HDR_CMD_ID_SHIFT 24
3777 __le16 type;
3778#define SPE_HDR_CONN_TYPE (0xFF<<0)
3779#define SPE_HDR_CONN_TYPE_SHIFT 0
3780#define SPE_HDR_FUNCTION_ID (0xFF<<8)
3781#define SPE_HDR_FUNCTION_ID_SHIFT 8
3782 __le16 reserved1;
3783};
3784
3785/*
3786 * specific data for ethernet slow path element
3787 */
3788union eth_specific_data {
3789 u8 protocol_data[8];
3790 struct regpair client_update_ramrod_data;
3791 struct regpair client_init_ramrod_init_data;
3792 struct eth_halt_ramrod_data halt_ramrod_data;
3793 struct regpair update_data_addr;
3794 struct eth_common_ramrod_data common_ramrod_data;
3795 struct regpair classify_cfg_addr;
3796 struct regpair filter_cfg_addr;
3797 struct regpair mcast_cfg_addr;
3798};
3799
3800/*
3801 * Ethernet slow path element
3802 */
3803struct eth_spe {
3804 struct spe_hdr hdr;
3805 union eth_specific_data data;
3806};
3807
3808
3809/*
3810 * Ethernet command ID for slow path elements
3811 */
3812enum eth_spqe_cmd_id {
3813 RAMROD_CMD_ID_ETH_UNUSED,
3814 RAMROD_CMD_ID_ETH_CLIENT_SETUP,
3815 RAMROD_CMD_ID_ETH_HALT,
3816 RAMROD_CMD_ID_ETH_FORWARD_SETUP,
3817 RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP,
3818 RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
3819 RAMROD_CMD_ID_ETH_EMPTY,
3820 RAMROD_CMD_ID_ETH_TERMINATE,
3821 RAMROD_CMD_ID_ETH_TPA_UPDATE,
3822 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES,
3823 RAMROD_CMD_ID_ETH_FILTER_RULES,
3824 RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3825 RAMROD_CMD_ID_ETH_RSS_UPDATE,
3826 RAMROD_CMD_ID_ETH_SET_MAC,
3827 MAX_ETH_SPQE_CMD_ID
3828};
3829
3830
3831/*
3832 * eth tpa update command
3833 */
3834enum eth_tpa_update_command {
3835 TPA_UPDATE_NONE_COMMAND,
3836 TPA_UPDATE_ENABLE_COMMAND,
3837 TPA_UPDATE_DISABLE_COMMAND,
3838 MAX_ETH_TPA_UPDATE_COMMAND
3839};
3840
3841
3842/*
3843 * Tx regular BD structure
3844 */
3845struct eth_tx_bd {
3846 __le32 addr_lo;
3847 __le32 addr_hi;
3848 __le16 total_pkt_bytes;
3849 __le16 nbytes;
3850 u8 reserved[4];
3851};
3852
3853
3854/*
3855 * structure for easy accessibility to assembler
3856 */
3857struct eth_tx_bd_flags {
3858 u8 as_bitfield;
3859#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
3860#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
3861#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
3862#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
3863#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
3864#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
3865#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
3866#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
3867#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
3868#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
3869#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
3870#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
3871#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
3872#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
3873};
3874
3875/*
3876 * The eth Tx Buffer Descriptor
3877 */
3878struct eth_tx_start_bd {
3879 __le32 addr_lo;
3880 __le32 addr_hi;
3881 __le16 nbd;
3882 __le16 nbytes;
3883 __le16 vlan_or_ethertype;
3884 struct eth_tx_bd_flags bd_flags;
3885 u8 general_data;
3886#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
3887#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
3888#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
3889#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
3890#define ETH_TX_START_BD_RESREVED (0x1<<5)
3891#define ETH_TX_START_BD_RESREVED_SHIFT 5
3892#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
3893#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
3894};
3895
3896/*
3897 * Tx parsing BD structure for ETH E1/E1h
3898 */
3899struct eth_tx_parse_bd_e1x {
3900 u8 global_data;
3901#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
3902#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
3903#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
3904#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
3905#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
3906#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
3907#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
3908#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
3909#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
3910#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
3911 u8 tcp_flags;
3912#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
3913#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
3914#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
3915#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
3916#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
3917#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
3918#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
3919#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
3920#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
3921#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
3922#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
3923#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
3924#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
3925#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
3926#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
3927#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
3928 u8 ip_hlen_w;
3929 s8 reserved;
3930 __le16 total_hlen_w;
3931 __le16 tcp_pseudo_csum;
3932 __le16 lso_mss;
3933 __le16 ip_id;
3934 __le32 tcp_send_seq;
3935};
3936
3937/*
3938 * Tx parsing BD structure for ETH E2
3939 */
3940struct eth_tx_parse_bd_e2 {
3941 __le16 dst_mac_addr_lo;
3942 __le16 dst_mac_addr_mid;
3943 __le16 dst_mac_addr_hi;
3944 __le16 src_mac_addr_lo;
3945 __le16 src_mac_addr_mid;
3946 __le16 src_mac_addr_hi;
3947 __le32 parsing_data;
3948#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
3949#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
3950#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
3951#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
3952#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
3953#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
3954#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
3955#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
3956};
3957
3958/*
3959 * The last BD in the BD memory will hold a pointer to the next BD memory
3960 */
3961struct eth_tx_next_bd {
3962 __le32 addr_lo;
3963 __le32 addr_hi;
3964 u8 reserved[8];
3965};
3966
3967/*
3968 * union for 4 Bd types
3969 */
3970union eth_tx_bd_types {
3971 struct eth_tx_start_bd start_bd;
3972 struct eth_tx_bd reg_bd;
3973 struct eth_tx_parse_bd_e1x parse_bd_e1x;
3974 struct eth_tx_parse_bd_e2 parse_bd_e2;
3975 struct eth_tx_next_bd next_bd;
3976};
3977
3978/*
3979 * array of 13 bds as appears in the eth xstorm context
3980 */
3981struct eth_tx_bds_array {
3982 union eth_tx_bd_types bds[13];
3983};
3984
3985
3986/*
3987 * VLAN mode on TX BDs
3988 */
3989enum eth_tx_vlan_type {
3990 X_ETH_NO_VLAN,
3991 X_ETH_OUTBAND_VLAN,
3992 X_ETH_INBAND_VLAN,
3993 X_ETH_FW_ADDED_VLAN,
3994 MAX_ETH_TX_VLAN_TYPE
3995};
3996
3997
3998/*
3999 * Ethernet VLAN filtering mode in E1x
4000 */
4001enum eth_vlan_filter_mode {
4002 ETH_VLAN_FILTER_ANY_VLAN,
4003 ETH_VLAN_FILTER_SPECIFIC_VLAN,
4004 ETH_VLAN_FILTER_CLASSIFY,
4005 MAX_ETH_VLAN_FILTER_MODE
4006};
4007
4008
4009/*
4010 * MAC filtering configuration command header
4011 */
4012struct mac_configuration_hdr {
4013 u8 length;
4014 u8 offset;
4015 __le16 client_id;
4016 __le32 echo;
4017};
4018
4019/*
4020 * MAC address in list for ramrod
4021 */
4022struct mac_configuration_entry {
4023 __le16 lsb_mac_addr;
4024 __le16 middle_mac_addr;
4025 __le16 msb_mac_addr;
4026 __le16 vlan_id;
4027 u8 pf_id;
4028 u8 flags;
4029#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
4030#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
4031#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
4032#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
4033#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
4034#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
4035#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
4036#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
4037#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
4038#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
4039#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
4040#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
4041 __le16 reserved0;
4042 __le32 clients_bit_vector;
4043};
4044
4045/*
4046 * MAC filtering configuration command
4047 */
4048struct mac_configuration_cmd {
4049 struct mac_configuration_hdr hdr;
4050 struct mac_configuration_entry config_table[64];
4051};
4052
4053
4054/*
4055 * Set-MAC command type (in E1x)
4056 */
4057enum set_mac_action_type {
4058 T_ETH_MAC_COMMAND_INVALIDATE,
4059 T_ETH_MAC_COMMAND_SET,
4060 MAX_SET_MAC_ACTION_TYPE
4061};
4062
4063
4064/*
4065 * tpa update ramrod data
4066 */
4067struct tpa_update_ramrod_data {
4068 u8 update_ipv4;
4069 u8 update_ipv6;
4070 u8 client_id;
4071 u8 max_tpa_queues;
4072 u8 max_sges_for_packet;
4073 u8 complete_on_both_clients;
4074 __le16 reserved1;
4075 __le16 sge_buff_size;
4076 __le16 max_agg_size;
4077 __le32 sge_page_base_lo;
4078 __le32 sge_page_base_hi;
4079 __le16 sge_pause_thr_low;
4080 __le16 sge_pause_thr_high;
4081};
4082
4083
4084/*
4085 * approximate-match multicast filtering for E1H per function in Tstorm
4086 */
4087struct tstorm_eth_approximate_match_multicast_filtering {
4088 u32 mcast_add_hash_bit_array[8];
4089};
4090
4091
4092/*
4093 * Common configuration parameters per function in Tstorm
4094 */
4095struct tstorm_eth_function_common_config {
4096 __le16 config_flags;
4097#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
4098#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
4099#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
4100#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
4101#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
4102#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
4103#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
4104#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
4105#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
4106#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
4107#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7)
4108#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
4109#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8)
4110#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
4111 u8 rss_result_mask;
4112 u8 reserved1;
4113 __le16 vlan_id[2];
4114};
4115
4116
4117/*
4118 * MAC filtering configuration parameters per port in Tstorm
4119 */
4120struct tstorm_eth_mac_filter_config {
4121 __le32 ucast_drop_all;
4122 __le32 ucast_accept_all;
4123 __le32 mcast_drop_all;
4124 __le32 mcast_accept_all;
4125 __le32 bcast_accept_all;
4126 __le32 vlan_filter[2];
4127 __le32 unmatched_unicast;
4128};
4129
4130
4131/*
4132 * tx only queue init ramrod data
4133 */
4134struct tx_queue_init_ramrod_data {
4135 struct client_init_general_data general;
4136 struct client_init_tx_data tx;
4137};
4138
4139
4140/*
4141 * Three RX producers for ETH
4142 */
4143struct ustorm_eth_rx_producers {
4144#if defined(__BIG_ENDIAN)
4145 u16 bd_prod;
4146 u16 cqe_prod;
4147#elif defined(__LITTLE_ENDIAN)
4148 u16 cqe_prod;
4149 u16 bd_prod;
4150#endif
4151#if defined(__BIG_ENDIAN)
4152 u16 reserved;
4153 u16 sge_prod;
4154#elif defined(__LITTLE_ENDIAN)
4155 u16 sge_prod;
4156 u16 reserved;
4157#endif
4158};
4159
4160
4161/*
4162 * cfc delete event data
4163 */
4164struct cfc_del_event_data {
4165 u32 cid;
4166 u32 reserved0;
4167 u32 reserved1;
4168};
4169
4170
4171/*
4172 * per-port SAFC demo variables
4173 */
4174struct cmng_flags_per_port {
4175 u32 cmng_enables;
4176#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
4177#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
4178#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
4179#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
4180#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2)
4181#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
4182#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3)
4183#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
4184#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4)
4185#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
4186 u32 __reserved1;
4187};
4188
4189
4190/*
4191 * per-port rate shaping variables
4192 */
4193struct rate_shaping_vars_per_port {
4194 u32 rs_periodic_timeout;
4195 u32 rs_threshold;
4196};
4197
4198/*
4199 * per-port fairness variables
4200 */
4201struct fairness_vars_per_port {
4202 u32 upper_bound;
4203 u32 fair_threshold;
4204 u32 fairness_timeout;
4205 u32 reserved0;
4206};
4207
4208/*
4209 * per-port SAFC variables
4210 */
4211struct safc_struct_per_port {
4212#if defined(__BIG_ENDIAN)
4213 u16 __reserved1;
4214 u8 __reserved0;
4215 u8 safc_timeout_usec;
4216#elif defined(__LITTLE_ENDIAN)
4217 u8 safc_timeout_usec;
4218 u8 __reserved0;
4219 u16 __reserved1;
4220#endif
4221 u8 cos_to_traffic_types[MAX_COS_NUMBER];
4222 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
4223};
4224
4225/*
4226 * Per-port congestion management variables
4227 */
4228struct cmng_struct_per_port {
4229 struct rate_shaping_vars_per_port rs_vars;
4230 struct fairness_vars_per_port fair_vars;
4231 struct safc_struct_per_port safc_vars;
4232 struct cmng_flags_per_port flags;
4233};
4234
4235
4236/*
4237 * Protocol-common command ID for slow path elements
4238 */
4239enum common_spqe_cmd_id {
4240 RAMROD_CMD_ID_COMMON_UNUSED,
4241 RAMROD_CMD_ID_COMMON_FUNCTION_START,
4242 RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
4243 RAMROD_CMD_ID_COMMON_CFC_DEL,
4244 RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
4245 RAMROD_CMD_ID_COMMON_STAT_QUERY,
4246 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
4247 RAMROD_CMD_ID_COMMON_START_TRAFFIC,
4248 RAMROD_CMD_ID_COMMON_RESERVED1,
4249 RAMROD_CMD_ID_COMMON_RESERVED2,
4250 MAX_COMMON_SPQE_CMD_ID
4251};
4252
4253
4254/*
4255 * Per-protocol connection types
4256 */
4257enum connection_type {
4258 ETH_CONNECTION_TYPE,
4259 TOE_CONNECTION_TYPE,
4260 RDMA_CONNECTION_TYPE,
4261 ISCSI_CONNECTION_TYPE,
4262 FCOE_CONNECTION_TYPE,
4263 RESERVED_CONNECTION_TYPE_0,
4264 RESERVED_CONNECTION_TYPE_1,
4265 RESERVED_CONNECTION_TYPE_2,
4266 NONE_CONNECTION_TYPE,
4267 MAX_CONNECTION_TYPE
4268};
4269
4270
4271/*
4272 * Cos modes
4273 */
4274enum cos_mode {
4275 OVERRIDE_COS,
4276 STATIC_COS,
4277 FW_WRR,
4278 MAX_COS_MODE
4279};
4280
4281
4282/*
4283 * Dynamic HC counters set by the driver
4284 */
4285struct hc_dynamic_drv_counter {
4286 u32 val[HC_SB_MAX_DYNAMIC_INDICES];
4287};
4288
4289/*
4290 * zone A per-queue data
4291 */
4292struct cstorm_queue_zone_data {
4293 struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
4294 struct regpair reserved[2];
4295};
4296
4297
4298/*
4299 * Vf-PF channel data in cstorm ram (non-triggered zone)
4300 */
4301struct vf_pf_channel_zone_data {
4302 u32 msg_addr_lo;
4303 u32 msg_addr_hi;
4304};
4305
4306/*
4307 * zone for VF non-triggered data
4308 */
4309struct non_trigger_vf_zone {
4310 struct vf_pf_channel_zone_data vf_pf_channel;
4311};
4312
4313/*
4314 * Vf-PF channel trigger zone in cstorm ram
4315 */
4316struct vf_pf_channel_zone_trigger {
4317 u8 addr_valid;
4318};
4319
4320/*
4321 * zone that triggers the in-bound interrupt
4322 */
4323struct trigger_vf_zone {
4324#if defined(__BIG_ENDIAN)
4325 u16 reserved1;
4326 u8 reserved0;
4327 struct vf_pf_channel_zone_trigger vf_pf_channel;
4328#elif defined(__LITTLE_ENDIAN)
4329 struct vf_pf_channel_zone_trigger vf_pf_channel;
4330 u8 reserved0;
4331 u16 reserved1;
4332#endif
4333 u32 reserved2;
4334};
4335
4336/*
4337 * zone B per-VF data
4338 */
4339struct cstorm_vf_zone_data {
4340 struct non_trigger_vf_zone non_trigger;
4341 struct trigger_vf_zone trigger;
4342};
4343
4344
4345/*
4346 * Dynamic host coalescing init parameters, per state machine
4347 */
4348struct dynamic_hc_sm_config {
4349 u32 threshold[3];
4350 u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
4351 u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
4352 u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
4353 u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
4354 u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
4355};
4356
4357/*
4358 * Dynamic host coalescing init parameters
4359 */
4360struct dynamic_hc_config {
4361 struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM];
4362};
4363
4364
4365struct e2_integ_data {
4366#if defined(__BIG_ENDIAN)
4367 u8 flags;
4368#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
4369#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
4370#define E2_INTEG_DATA_LB_TX (0x1<<1)
4371#define E2_INTEG_DATA_LB_TX_SHIFT 1
4372#define E2_INTEG_DATA_COS_TX (0x1<<2)
4373#define E2_INTEG_DATA_COS_TX_SHIFT 2
4374#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
4375#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
4376#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
4377#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
4378#define E2_INTEG_DATA_RESERVED (0x7<<5)
4379#define E2_INTEG_DATA_RESERVED_SHIFT 5
4380 u8 cos;
4381 u8 voq;
4382 u8 pbf_queue;
4383#elif defined(__LITTLE_ENDIAN)
4384 u8 pbf_queue;
4385 u8 voq;
4386 u8 cos;
4387 u8 flags;
4388#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
4389#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
4390#define E2_INTEG_DATA_LB_TX (0x1<<1)
4391#define E2_INTEG_DATA_LB_TX_SHIFT 1
4392#define E2_INTEG_DATA_COS_TX (0x1<<2)
4393#define E2_INTEG_DATA_COS_TX_SHIFT 2
4394#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
4395#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
4396#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
4397#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
4398#define E2_INTEG_DATA_RESERVED (0x7<<5)
4399#define E2_INTEG_DATA_RESERVED_SHIFT 5
4400#endif
4401#if defined(__BIG_ENDIAN)
4402 u16 reserved3;
4403 u8 reserved2;
4404 u8 ramEn;
4405#elif defined(__LITTLE_ENDIAN)
4406 u8 ramEn;
4407 u8 reserved2;
4408 u16 reserved3;
4409#endif
4410};
4411
4412
4413/*
4414 * set mac event data
4415 */
4416struct eth_event_data {
4417 u32 echo;
4418 u32 reserved0;
4419 u32 reserved1;
4420};
4421
4422
4423/*
4424 * pf-vf event data
4425 */
4426struct vf_pf_event_data {
4427 u8 vf_id;
4428 u8 reserved0;
4429 u16 reserved1;
4430 u32 msg_addr_lo;
4431 u32 msg_addr_hi;
4432};
4433
4434/*
4435 * VF FLR event data
4436 */
4437struct vf_flr_event_data {
4438 u8 vf_id;
4439 u8 reserved0;
4440 u16 reserved1;
4441 u32 reserved2;
4442 u32 reserved3;
4443};
4444
4445/*
4446 * malicious VF event data
4447 */
4448struct malicious_vf_event_data {
4449 u8 vf_id;
4450 u8 reserved0;
4451 u16 reserved1;
4452 u32 reserved2;
4453 u32 reserved3;
4454};
4455
4456/*
4457 * union for all event ring message types
4458 */
4459union event_data {
4460 struct vf_pf_event_data vf_pf_event;
4461 struct eth_event_data eth_event;
4462 struct cfc_del_event_data cfc_del_event;
4463 struct vf_flr_event_data vf_flr_event;
4464 struct malicious_vf_event_data malicious_vf_event;
4465};
4466
4467
4468/*
4469 * per PF event ring data
4470 */
4471struct event_ring_data {
4472 struct regpair base_addr;
4473#if defined(__BIG_ENDIAN)
4474 u8 index_id;
4475 u8 sb_id;
4476 u16 producer;
4477#elif defined(__LITTLE_ENDIAN)
4478 u16 producer;
4479 u8 sb_id;
4480 u8 index_id;
4481#endif
4482 u32 reserved0;
4483};
4484
4485
4486/*
4487 * event ring message element (each element is 128 bits)
4488 */
4489struct event_ring_msg {
4490 u8 opcode;
4491 u8 error;
4492 u16 reserved1;
4493 union event_data data;
4494};
4495
4496/*
4497 * event ring next page element (128 bits)
4498 */
4499struct event_ring_next {
4500 struct regpair addr;
4501 u32 reserved[2];
4502};
4503
4504/*
4505 * union for event ring element types (each element is 128 bits)
4506 */
4507union event_ring_elem {
4508 struct event_ring_msg message;
4509 struct event_ring_next next_page;
4510};
4511
4512
4513/*
4514 * Common event ring opcodes
4515 */
4516enum event_ring_opcode {
4517 EVENT_RING_OPCODE_VF_PF_CHANNEL,
4518 EVENT_RING_OPCODE_FUNCTION_START,
4519 EVENT_RING_OPCODE_FUNCTION_STOP,
4520 EVENT_RING_OPCODE_CFC_DEL,
4521 EVENT_RING_OPCODE_CFC_DEL_WB,
4522 EVENT_RING_OPCODE_STAT_QUERY,
4523 EVENT_RING_OPCODE_STOP_TRAFFIC,
4524 EVENT_RING_OPCODE_START_TRAFFIC,
4525 EVENT_RING_OPCODE_VF_FLR,
4526 EVENT_RING_OPCODE_MALICIOUS_VF,
4527 EVENT_RING_OPCODE_FORWARD_SETUP,
4528 EVENT_RING_OPCODE_RSS_UPDATE_RULES,
4529 EVENT_RING_OPCODE_RESERVED1,
4530 EVENT_RING_OPCODE_RESERVED2,
4531 EVENT_RING_OPCODE_SET_MAC,
4532 EVENT_RING_OPCODE_CLASSIFICATION_RULES,
4533 EVENT_RING_OPCODE_FILTERS_RULES,
4534 EVENT_RING_OPCODE_MULTICAST_RULES,
4535 MAX_EVENT_RING_OPCODE
4536};
4537
4538
4539/*
4540 * Modes for fairness algorithm
4541 */
4542enum fairness_mode {
4543 FAIRNESS_COS_WRR_MODE,
4544 FAIRNESS_COS_ETS_MODE,
4545 MAX_FAIRNESS_MODE
4546};
4547
4548
4549/*
4550 * per-vnic fairness variables
4551 */
4552struct fairness_vars_per_vn {
4553 u32 cos_credit_delta[MAX_COS_NUMBER];
4554 u32 vn_credit_delta;
4555 u32 __reserved0;
4556};
4557
4558
4559/*
4560 * Priority and cos
4561 */
4562struct priority_cos {
4563 u8 priority;
4564 u8 cos;
4565 __le16 reserved1;
4566};
4567
4568/*
4569 * The data for flow control configuration
4570 */
4571struct flow_control_configuration {
4572 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
4573 u8 dcb_enabled;
4574 u8 dcb_version;
4575 u8 dont_add_pri_0_en;
4576 u8 reserved1;
4577 __le32 reserved2;
4578};
4579
4580
4581/*
4582 *
4583 */
4584struct function_start_data {
4585 __le16 function_mode;
4586 __le16 sd_vlan_tag;
4587 u16 reserved;
4588 u8 path_id;
4589 u8 network_cos_mode;
4590};
4591
4592
4593/*
4594 * FW version stored in the Xstorm RAM
4595 */
4596struct fw_version {
4597#if defined(__BIG_ENDIAN)
4598 u8 engineering;
4599 u8 revision;
4600 u8 minor;
4601 u8 major;
4602#elif defined(__LITTLE_ENDIAN)
4603 u8 major;
4604 u8 minor;
4605 u8 revision;
4606 u8 engineering;
4607#endif
4608 u32 flags;
4609#define FW_VERSION_OPTIMIZED (0x1<<0)
4610#define FW_VERSION_OPTIMIZED_SHIFT 0
4611#define FW_VERSION_BIG_ENDIEN (0x1<<1)
4612#define FW_VERSION_BIG_ENDIEN_SHIFT 1
4613#define FW_VERSION_CHIP_VERSION (0x3<<2)
4614#define FW_VERSION_CHIP_VERSION_SHIFT 2
4615#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
4616#define __FW_VERSION_RESERVED_SHIFT 4
4617};
4618
4619
4620/*
4621 * Dynamic Host-Coalescing - Driver(host) counters
4622 */
4623struct hc_dynamic_sb_drv_counters {
4624 u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
4625};
4626
4627
4628/*
4629 * 2 bytes. configuration/state parameters for a single protocol index
4630 */
4631struct hc_index_data {
4632#if defined(__BIG_ENDIAN)
4633 u8 flags;
4634#define HC_INDEX_DATA_SM_ID (0x1<<0)
4635#define HC_INDEX_DATA_SM_ID_SHIFT 0
4636#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
4637#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
4638#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
4639#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
4640#define HC_INDEX_DATA_RESERVE (0x1F<<3)
4641#define HC_INDEX_DATA_RESERVE_SHIFT 3
4642 u8 timeout;
4643#elif defined(__LITTLE_ENDIAN)
4644 u8 timeout;
4645 u8 flags;
4646#define HC_INDEX_DATA_SM_ID (0x1<<0)
4647#define HC_INDEX_DATA_SM_ID_SHIFT 0
4648#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
4649#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
4650#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
4651#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
4652#define HC_INDEX_DATA_RESERVE (0x1F<<3)
4653#define HC_INDEX_DATA_RESERVE_SHIFT 3
4654#endif
4655};
4656
4657
4658/*
4659 * HC state-machine
4660 */
4661struct hc_status_block_sm {
4662#if defined(__BIG_ENDIAN)
4663 u8 igu_seg_id;
4664 u8 igu_sb_id;
4665 u8 timer_value;
4666 u8 __flags;
4667#elif defined(__LITTLE_ENDIAN)
4668 u8 __flags;
4669 u8 timer_value;
4670 u8 igu_sb_id;
4671 u8 igu_seg_id;
4672#endif
4673 u32 time_to_expire;
4674};
4675
4676/*
4677 * hold PCI identification variables- used in various places in firmware
4678 */
4679struct pci_entity {
4680#if defined(__BIG_ENDIAN)
4681 u8 vf_valid;
4682 u8 vf_id;
4683 u8 vnic_id;
4684 u8 pf_id;
4685#elif defined(__LITTLE_ENDIAN)
4686 u8 pf_id;
4687 u8 vnic_id;
4688 u8 vf_id;
4689 u8 vf_valid;
4690#endif
4691};
4692
4693/*
4694 * The fast-path status block meta-data, common to all chips
4695 */
4696struct hc_sb_data {
4697 struct regpair host_sb_addr;
4698 struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
4699 struct pci_entity p_func;
4700#if defined(__BIG_ENDIAN)
4701 u8 rsrv0;
4702 u8 state;
4703 u8 dhc_qzone_id;
4704 u8 same_igu_sb_1b;
4705#elif defined(__LITTLE_ENDIAN)
4706 u8 same_igu_sb_1b;
4707 u8 dhc_qzone_id;
4708 u8 state;
4709 u8 rsrv0;
4710#endif
4711 struct regpair rsrv1[2];
4712};
4713
4714
4715/*
4716 * Segment types for host coaslescing
4717 */
4718enum hc_segment {
4719 HC_REGULAR_SEGMENT,
4720 HC_DEFAULT_SEGMENT,
4721 MAX_HC_SEGMENT
4722};
4723
4724
4725/*
4726 * The fast-path status block meta-data
4727 */
4728struct hc_sp_status_block_data {
4729 struct regpair host_sb_addr;
4730#if defined(__BIG_ENDIAN)
4731 u8 rsrv1;
4732 u8 state;
4733 u8 igu_seg_id;
4734 u8 igu_sb_id;
4735#elif defined(__LITTLE_ENDIAN)
4736 u8 igu_sb_id;
4737 u8 igu_seg_id;
4738 u8 state;
4739 u8 rsrv1;
4740#endif
4741 struct pci_entity p_func;
4742};
4743
4744
4745/*
4746 * The fast-path status block meta-data
4747 */
4748struct hc_status_block_data_e1x {
4749 struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
4750 struct hc_sb_data common;
4751};
4752
4753
4754/*
4755 * The fast-path status block meta-data
4756 */
4757struct hc_status_block_data_e2 {
4758 struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
4759 struct hc_sb_data common;
4760};
4761
4762
4763/*
4764 * IGU block operartion modes (in Everest2)
4765 */
4766enum igu_mode {
4767 HC_IGU_BC_MODE,
4768 HC_IGU_NBC_MODE,
4769 MAX_IGU_MODE
4770};
4771
4772
4773/*
4774 * IP versions
4775 */
4776enum ip_ver {
4777 IP_V4,
4778 IP_V6,
4779 MAX_IP_VER
4780};
4781
4782
4783/*
4784 * Multi-function modes
4785 */
4786enum mf_mode {
4787 SINGLE_FUNCTION,
4788 MULTI_FUNCTION_SD,
4789 MULTI_FUNCTION_SI,
4790 MULTI_FUNCTION_RESERVED,
4791 MAX_MF_MODE
4792};
4793
4794/*
4795 * Protocol-common statistics collected by the Tstorm (per pf)
4796 */
4797struct tstorm_per_pf_stats {
4798 struct regpair rcv_error_bytes;
4799};
4800
4801/*
4802 *
4803 */
4804struct per_pf_stats {
4805 struct tstorm_per_pf_stats tstorm_pf_statistics;
4806};
4807
4808
4809/*
4810 * Protocol-common statistics collected by the Tstorm (per port)
4811 */
4812struct tstorm_per_port_stats {
4813 __le32 mac_discard;
4814 __le32 mac_filter_discard;
4815 __le32 brb_truncate_discard;
4816 __le32 mf_tag_discard;
4817 __le32 packet_drop;
4818 __le32 reserved;
4819};
4820
4821/*
4822 *
4823 */
4824struct per_port_stats {
4825 struct tstorm_per_port_stats tstorm_port_statistics;
4826};
4827
4828
4829/*
4830 * Protocol-common statistics collected by the Tstorm (per client)
4831 */
4832struct tstorm_per_queue_stats {
4833 struct regpair rcv_ucast_bytes;
4834 __le32 rcv_ucast_pkts;
4835 __le32 checksum_discard;
4836 struct regpair rcv_bcast_bytes;
4837 __le32 rcv_bcast_pkts;
4838 __le32 pkts_too_big_discard;
4839 struct regpair rcv_mcast_bytes;
4840 __le32 rcv_mcast_pkts;
4841 __le32 ttl0_discard;
4842 __le16 no_buff_discard;
4843 __le16 reserved0;
4844 __le32 reserved1;
4845};
4846
4847/*
4848 * Protocol-common statistics collected by the Ustorm (per client)
4849 */
4850struct ustorm_per_queue_stats {
4851 struct regpair ucast_no_buff_bytes;
4852 struct regpair mcast_no_buff_bytes;
4853 struct regpair bcast_no_buff_bytes;
4854 __le32 ucast_no_buff_pkts;
4855 __le32 mcast_no_buff_pkts;
4856 __le32 bcast_no_buff_pkts;
4857 __le32 coalesced_pkts;
4858 struct regpair coalesced_bytes;
4859 __le32 coalesced_events;
4860 __le32 coalesced_aborts;
4861};
4862
4863/*
4864 * Protocol-common statistics collected by the Xstorm (per client)
4865 */
4866struct xstorm_per_queue_stats {
4867 struct regpair ucast_bytes_sent;
4868 struct regpair mcast_bytes_sent;
4869 struct regpair bcast_bytes_sent;
4870 __le32 ucast_pkts_sent;
4871 __le32 mcast_pkts_sent;
4872 __le32 bcast_pkts_sent;
4873 __le32 error_drop_pkts;
4874};
4875
4876/*
4877 *
4878 */
4879struct per_queue_stats {
4880 struct tstorm_per_queue_stats tstorm_queue_statistics;
4881 struct ustorm_per_queue_stats ustorm_queue_statistics;
4882 struct xstorm_per_queue_stats xstorm_queue_statistics;
4883};
4884
4885
4886/*
4887 * FW version stored in first line of pram
4888 */
4889struct pram_fw_version {
4890 u8 major;
4891 u8 minor;
4892 u8 revision;
4893 u8 engineering;
4894 u8 flags;
4895#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
4896#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
4897#define PRAM_FW_VERSION_STORM_ID (0x3<<1)
4898#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
4899#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
4900#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
4901#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
4902#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
4903#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
4904#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
4905};
4906
4907
4908/*
4909 * Ethernet slow path element
4910 */
4911union protocol_common_specific_data {
4912 u8 protocol_data[8];
4913 struct regpair phy_address;
4914 struct regpair mac_config_addr;
4915};
4916
4917/*
4918 * The send queue element
4919 */
4920struct protocol_common_spe {
4921 struct spe_hdr hdr;
4922 union protocol_common_specific_data data;
4923};
4924
4925
4926/*
4927 * a single rate shaping counter. can be used as protocol or vnic counter
4928 */
4929struct rate_shaping_counter {
4930 u32 quota;
4931#if defined(__BIG_ENDIAN)
4932 u16 __reserved0;
4933 u16 rate;
4934#elif defined(__LITTLE_ENDIAN)
4935 u16 rate;
4936 u16 __reserved0;
4937#endif
4938};
4939
4940
4941/*
4942 * per-vnic rate shaping variables
4943 */
4944struct rate_shaping_vars_per_vn {
4945 struct rate_shaping_counter vn_counter;
4946};
4947
4948
4949/*
4950 * The send queue element
4951 */
4952struct slow_path_element {
4953 struct spe_hdr hdr;
4954 struct regpair protocol_data;
4955};
4956
4957
4958/*
4959 * Protocol-common statistics counter
4960 */
4961struct stats_counter {
4962 __le16 xstats_counter;
4963 __le16 reserved0;
4964 __le32 reserved1;
4965 __le16 tstats_counter;
4966 __le16 reserved2;
4967 __le32 reserved3;
4968 __le16 ustats_counter;
4969 __le16 reserved4;
4970 __le32 reserved5;
4971 __le16 cstats_counter;
4972 __le16 reserved6;
4973 __le32 reserved7;
4974};
4975
4976
4977/*
4978 *
4979 */
4980struct stats_query_entry {
4981 u8 kind;
4982 u8 index;
4983 __le16 funcID;
4984 __le32 reserved;
4985 struct regpair address;
4986};
4987
4988/*
4989 * statistic command
4990 */
4991struct stats_query_cmd_group {
4992 struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
4993};
4994
4995
4996/*
4997 * statistic command header
4998 */
4999struct stats_query_header {
5000 u8 cmd_num;
5001 u8 reserved0;
5002 __le16 drv_stats_counter;
5003 __le32 reserved1;
5004 struct regpair stats_counters_addrs;
5005};
5006
5007
5008/*
5009 * Types of statistcis query entry
5010 */
5011enum stats_query_type {
5012 STATS_TYPE_QUEUE,
5013 STATS_TYPE_PORT,
5014 STATS_TYPE_PF,
5015 STATS_TYPE_TOE,
5016 STATS_TYPE_FCOE,
5017 MAX_STATS_QUERY_TYPE
5018};
5019
5020
5021/*
5022 * Indicate of the function status block state
5023 */
5024enum status_block_state {
5025 SB_DISABLED,
5026 SB_ENABLED,
5027 SB_CLEANED,
5028 MAX_STATUS_BLOCK_STATE
5029};
5030
5031
5032/*
5033 * Storm IDs (including attentions for IGU related enums)
5034 */
5035enum storm_id {
5036 USTORM_ID,
5037 CSTORM_ID,
5038 XSTORM_ID,
5039 TSTORM_ID,
5040 ATTENTION_ID,
5041 MAX_STORM_ID
5042};
5043
5044
5045/*
5046 * Taffic types used in ETS and flow control algorithms
5047 */
5048enum traffic_type {
5049 LLFC_TRAFFIC_TYPE_NW,
5050 LLFC_TRAFFIC_TYPE_FCOE,
5051 LLFC_TRAFFIC_TYPE_ISCSI,
5052 MAX_TRAFFIC_TYPE
5053};
5054
5055
5056/*
5057 * zone A per-queue data
5058 */
5059struct tstorm_queue_zone_data {
5060 struct regpair reserved[4];
5061};
5062
5063
5064/*
5065 * zone B per-VF data
5066 */
5067struct tstorm_vf_zone_data {
5068 struct regpair reserved;
5069};
5070
5071
5072/*
5073 * zone A per-queue data
5074 */
5075struct ustorm_queue_zone_data {
5076 struct ustorm_eth_rx_producers eth_rx_producers;
5077 struct regpair reserved[3];
5078};
5079
5080
5081/*
5082 * zone B per-VF data
5083 */
5084struct ustorm_vf_zone_data {
5085 struct regpair reserved;
5086};
5087
5088
5089/*
5090 * data per VF-PF channel
5091 */
5092struct vf_pf_channel_data {
5093#if defined(__BIG_ENDIAN)
5094 u16 reserved0;
5095 u8 valid;
5096 u8 state;
5097#elif defined(__LITTLE_ENDIAN)
5098 u8 state;
5099 u8 valid;
5100 u16 reserved0;
5101#endif
5102 u32 reserved1;
5103};
5104
5105
5106/*
5107 * State of VF-PF channel
5108 */
5109enum vf_pf_channel_state {
5110 VF_PF_CHANNEL_STATE_READY,
5111 VF_PF_CHANNEL_STATE_WAITING_FOR_ACK,
5112 MAX_VF_PF_CHANNEL_STATE
5113};
5114
5115
5116/*
5117 * zone A per-queue data
5118 */
5119struct xstorm_queue_zone_data {
5120 struct regpair reserved[4];
5121};
5122
5123
5124/*
5125 * zone B per-VF data
5126 */
5127struct xstorm_vf_zone_data {
5128 struct regpair reserved;
5129};
5130
5131#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
new file mode 100644
index 00000000000..4d748e77d1a
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -0,0 +1,567 @@
1/* bnx2x_init.h: Broadcom Everest network driver.
2 * Structures and macroes needed during the initialization.
3 *
4 * Copyright (c) 2007-2011 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
11 * Written by: Eliezer Tamir
12 * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
13 */
14
15#ifndef BNX2X_INIT_H
16#define BNX2X_INIT_H
17
18/* Init operation types and structures */
19enum {
20 OP_RD = 0x1, /* read a single register */
21 OP_WR, /* write a single register */
22 OP_SW, /* copy a string to the device */
23 OP_ZR, /* clear memory */
24 OP_ZP, /* unzip then copy with DMAE */
25 OP_WR_64, /* write 64 bit pattern */
26 OP_WB, /* copy a string using DMAE */
27 OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
28 /* Skip the following ops if all of the init modes don't match */
29 OP_IF_MODE_OR,
30 /* Skip the following ops if any of the init modes don't match */
31 OP_IF_MODE_AND,
32 OP_MAX
33};
34
35enum {
36 STAGE_START,
37 STAGE_END,
38};
39
40/* Returns the index of start or end of a specific block stage in ops array*/
41#define BLOCK_OPS_IDX(block, stage, end) \
42 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
43
44
45/* structs for the various opcodes */
46struct raw_op {
47 u32 op:8;
48 u32 offset:24;
49 u32 raw_data;
50};
51
52struct op_read {
53 u32 op:8;
54 u32 offset:24;
55 u32 val;
56};
57
58struct op_write {
59 u32 op:8;
60 u32 offset:24;
61 u32 val;
62};
63
64struct op_arr_write {
65 u32 op:8;
66 u32 offset:24;
67#ifdef __BIG_ENDIAN
68 u16 data_len;
69 u16 data_off;
70#else /* __LITTLE_ENDIAN */
71 u16 data_off;
72 u16 data_len;
73#endif
74};
75
76struct op_zero {
77 u32 op:8;
78 u32 offset:24;
79 u32 len;
80};
81
82struct op_if_mode {
83 u32 op:8;
84 u32 cmd_offset:24;
85 u32 mode_bit_map;
86};
87
88
89union init_op {
90 struct op_read read;
91 struct op_write write;
92 struct op_arr_write arr_wr;
93 struct op_zero zero;
94 struct raw_op raw;
95 struct op_if_mode if_mode;
96};
97
98
99/* Init Phases */
100enum {
101 PHASE_COMMON,
102 PHASE_PORT0,
103 PHASE_PORT1,
104 PHASE_PF0,
105 PHASE_PF1,
106 PHASE_PF2,
107 PHASE_PF3,
108 PHASE_PF4,
109 PHASE_PF5,
110 PHASE_PF6,
111 PHASE_PF7,
112 NUM_OF_INIT_PHASES
113};
114
115/* Init Modes */
116enum {
117 MODE_ASIC = 0x00000001,
118 MODE_FPGA = 0x00000002,
119 MODE_EMUL = 0x00000004,
120 MODE_E2 = 0x00000008,
121 MODE_E3 = 0x00000010,
122 MODE_PORT2 = 0x00000020,
123 MODE_PORT4 = 0x00000040,
124 MODE_SF = 0x00000080,
125 MODE_MF = 0x00000100,
126 MODE_MF_SD = 0x00000200,
127 MODE_MF_SI = 0x00000400,
128 MODE_MF_NIV = 0x00000800,
129 MODE_E3_A0 = 0x00001000,
130 MODE_E3_B0 = 0x00002000,
131 MODE_COS3 = 0x00004000,
132 MODE_COS6 = 0x00008000,
133 MODE_LITTLE_ENDIAN = 0x00010000,
134 MODE_BIG_ENDIAN = 0x00020000,
135};
136
137/* Init Blocks */
138enum {
139 BLOCK_ATC,
140 BLOCK_BRB1,
141 BLOCK_CCM,
142 BLOCK_CDU,
143 BLOCK_CFC,
144 BLOCK_CSDM,
145 BLOCK_CSEM,
146 BLOCK_DBG,
147 BLOCK_DMAE,
148 BLOCK_DORQ,
149 BLOCK_HC,
150 BLOCK_IGU,
151 BLOCK_MISC,
152 BLOCK_NIG,
153 BLOCK_PBF,
154 BLOCK_PGLUE_B,
155 BLOCK_PRS,
156 BLOCK_PXP2,
157 BLOCK_PXP,
158 BLOCK_QM,
159 BLOCK_SRC,
160 BLOCK_TCM,
161 BLOCK_TM,
162 BLOCK_TSDM,
163 BLOCK_TSEM,
164 BLOCK_UCM,
165 BLOCK_UPB,
166 BLOCK_USDM,
167 BLOCK_USEM,
168 BLOCK_XCM,
169 BLOCK_XPB,
170 BLOCK_XSDM,
171 BLOCK_XSEM,
172 BLOCK_MISC_AEU,
173 NUM_OF_INIT_BLOCKS
174};
175
176/* QM queue numbers */
177#define BNX2X_ETH_Q 0
178#define BNX2X_TOE_Q 3
179#define BNX2X_TOE_ACK_Q 6
180#define BNX2X_ISCSI_Q 9
181#define BNX2X_ISCSI_ACK_Q 11
182#define BNX2X_FCOE_Q 10
183
184/* Vnics per mode */
185#define BNX2X_PORT2_MODE_NUM_VNICS 4
186#define BNX2X_PORT4_MODE_NUM_VNICS 2
187
188/* COS offset for port1 in E3 B0 4port mode */
189#define BNX2X_E3B0_PORT1_COS_OFFSET 3
190
191/* QM Register addresses */
192#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
193 (QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
194#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
195 (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
196#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
197 (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
198
199/* extracts the QM queue number for the specified port and vnic */
200#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
201 ((((port) << 1) | (vnic)) * 16 + (q_num))
202
203
204/* Maps the specified queue to the specified COS */
205static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
206{
207 /* find current COS mapping */
208 u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
209
210 /* check if queue->COS mapping has changed */
211 if (curr_cos != new_cos) {
212 u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
213 u32 reg_addr, reg_bit_map, vnic;
214
215 /* update parameters for 4port mode */
216 if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
217 num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
218 if (BP_PORT(bp)) {
219 curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
220 new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
221 }
222 }
223
224 /* change queue mapping for each VNIC */
225 for (vnic = 0; vnic < num_vnics; vnic++) {
226 u32 pf_q_num =
227 BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
228 u32 q_bit_map = 1 << (pf_q_num & 0x1f);
229
230 /* overwrite queue->VOQ mapping */
231 REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
232
233 /* clear queue bit from current COS bit map */
234 reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
235 reg_bit_map = REG_RD(bp, reg_addr);
236 REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
237
238 /* set queue bit in new COS bit map */
239 reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
240 reg_bit_map = REG_RD(bp, reg_addr);
241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
242
243 /* set/clear queue bit in command-queue bit map
244 (E2/E3A0 only, valid COS values are 0/1) */
245 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
246 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
247 reg_bit_map = REG_RD(bp, reg_addr);
248 q_bit_map = 1 << (2 * (pf_q_num & 0xf));
249 reg_bit_map = new_cos ?
250 (reg_bit_map | q_bit_map) :
251 (reg_bit_map & (~q_bit_map));
252 REG_WR(bp, reg_addr, reg_bit_map);
253 }
254 }
255 }
256}
257
258/* Configures the QM according to the specified per-traffic-type COSes */
259static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
260 struct priority_cos *traffic_cos)
261{
262 bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
263 traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
264 bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
265 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
266 bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
267 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
268 if (mode != STATIC_COS) {
269 /* required only in backward compatible COS mode */
270 bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
271 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
272 bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
273 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
274 bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
275 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
276 }
277}
278
279
280/* Returns the index of start or end of a specific block stage in ops array*/
281#define BLOCK_OPS_IDX(block, stage, end) \
282 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
283
284
285#define INITOP_SET 0 /* set the HW directly */
286#define INITOP_CLEAR 1 /* clear the HW directly */
287#define INITOP_INIT 2 /* set the init-value array */
288
289/****************************************************************************
290* ILT management
291****************************************************************************/
292struct ilt_line {
293 dma_addr_t page_mapping;
294 void *page;
295 u32 size;
296};
297
298struct ilt_client_info {
299 u32 page_size;
300 u16 start;
301 u16 end;
302 u16 client_num;
303 u16 flags;
304#define ILT_CLIENT_SKIP_INIT 0x1
305#define ILT_CLIENT_SKIP_MEM 0x2
306};
307
308struct bnx2x_ilt {
309 u32 start_line;
310 struct ilt_line *lines;
311 struct ilt_client_info clients[4];
312#define ILT_CLIENT_CDU 0
313#define ILT_CLIENT_QM 1
314#define ILT_CLIENT_SRC 2
315#define ILT_CLIENT_TM 3
316};
317
318/****************************************************************************
319* SRC configuration
320****************************************************************************/
321struct src_ent {
322 u8 opaque[56];
323 u64 next;
324};
325
326/****************************************************************************
327* Parity configuration
328****************************************************************************/
329#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
330{ \
331 block##_REG_##block##_PRTY_MASK, \
332 block##_REG_##block##_PRTY_STS_CLR, \
333 en_mask, {m1, m1h, m2, m3}, #block \
334}
335
336#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
337{ \
338 block##_REG_##block##_PRTY_MASK_0, \
339 block##_REG_##block##_PRTY_STS_CLR_0, \
340 en_mask, {m1, m1h, m2, m3}, #block"_0" \
341}
342
343#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
344{ \
345 block##_REG_##block##_PRTY_MASK_1, \
346 block##_REG_##block##_PRTY_STS_CLR_1, \
347 en_mask, {m1, m1h, m2, m3}, #block"_1" \
348}
349
350static const struct {
351 u32 mask_addr;
352 u32 sts_clr_addr;
353 u32 en_mask; /* Mask to enable parity attentions */
354 struct {
355 u32 e1; /* 57710 */
356 u32 e1h; /* 57711 */
357 u32 e2; /* 57712 */
358 u32 e3; /* 578xx */
359 } reg_mask; /* Register mask (all valid bits) */
360 char name[7]; /* Block's longest name is 6 characters long
361 * (name + suffix)
362 */
363} bnx2x_blocks_parity_data[] = {
364 /* bit 19 masked */
365 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
366 /* bit 5,18,20-31 */
367 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
368 /* bit 5 */
369 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
370 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
371 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
372
373 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
374 * want to handle "system kill" flow at the moment.
375 */
376 BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
377 0x7ffffff),
378 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
379 0xffffffff),
380 BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
381 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
382 BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
383 BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
384 BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff),
385 BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
386 BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
387 BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
388 BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
389 BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
390 BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
391 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
392 GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
393 {0xf, 0xf, 0xf, 0xf}, "UPB"},
394 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
395 GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
396 {0xf, 0xf, 0xf, 0xf}, "XPB"},
397 BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
398 BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
399 BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
400 BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
401 BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
402 BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
403 BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
404 BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
405 BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
406 BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
407 BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
408 BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
409 BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
410 BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
411 BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
412 BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
413 BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
414 BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
415 0xffffffff),
416 BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
417 BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
418 0xffffffff),
419 BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
420 BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
421 0xffffffff),
422 BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
423 BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
424 0xffffffff),
425 BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
426};
427
428
429/* [28] MCP Latched rom_parity
430 * [29] MCP Latched ump_rx_parity
431 * [30] MCP Latched ump_tx_parity
432 * [31] MCP Latched scpad_parity
433 */
434#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
435 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
436 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
437 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
438 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
439
440/* Below registers control the MCP parity attention output. When
441 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
442 * enabled, when cleared - disabled.
443 */
444static const u32 mcp_attn_ctl_regs[] = {
445 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
446 MISC_REG_AEU_ENABLE4_NIG_0,
447 MISC_REG_AEU_ENABLE4_PXP_0,
448 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
449 MISC_REG_AEU_ENABLE4_NIG_1,
450 MISC_REG_AEU_ENABLE4_PXP_1
451};
452
453static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
454{
455 int i;
456 u32 reg_val;
457
458 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
459 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
460
461 if (enable)
462 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
463 else
464 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
465
466 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
467 }
468}
469
470static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
471{
472 if (CHIP_IS_E1(bp))
473 return bnx2x_blocks_parity_data[idx].reg_mask.e1;
474 else if (CHIP_IS_E1H(bp))
475 return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
476 else if (CHIP_IS_E2(bp))
477 return bnx2x_blocks_parity_data[idx].reg_mask.e2;
478 else /* CHIP_IS_E3 */
479 return bnx2x_blocks_parity_data[idx].reg_mask.e3;
480}
481
482static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
483{
484 int i;
485
486 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
487 u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
488
489 if (dis_mask) {
490 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
491 dis_mask);
492 DP(NETIF_MSG_HW, "Setting parity mask "
493 "for %s to\t\t0x%x\n",
494 bnx2x_blocks_parity_data[i].name, dis_mask);
495 }
496 }
497
498 /* Disable MCP parity attentions */
499 bnx2x_set_mcp_parity(bp, false);
500}
501
502/**
503 * Clear the parity error status registers.
504 */
505static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
506{
507 int i;
508 u32 reg_val, mcp_aeu_bits =
509 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
510 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
511 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
512 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
513
514 /* Clear SEM_FAST parities */
515 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
516 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
517 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
518 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
519
520 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
521 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
522
523 if (reg_mask) {
524 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
525 sts_clr_addr);
526 if (reg_val & reg_mask)
527 DP(NETIF_MSG_HW,
528 "Parity errors in %s: 0x%x\n",
529 bnx2x_blocks_parity_data[i].name,
530 reg_val & reg_mask);
531 }
532 }
533
534 /* Check if there were parity attentions in MCP */
535 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
536 if (reg_val & mcp_aeu_bits)
537 DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
538 reg_val & mcp_aeu_bits);
539
540 /* Clear parity attentions in MCP:
541 * [7] clears Latched rom_parity
542 * [8] clears Latched ump_rx_parity
543 * [9] clears Latched ump_tx_parity
544 * [10] clears Latched scpad_parity (both ports)
545 */
546 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
547}
548
549static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
550{
551 int i;
552
553 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
554 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
555
556 if (reg_mask)
557 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
558 bnx2x_blocks_parity_data[i].en_mask & reg_mask);
559 }
560
561 /* Enable MCP parity attentions */
562 bnx2x_set_mcp_parity(bp, true);
563}
564
565
566#endif /* BNX2X_INIT_H */
567
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
new file mode 100644
index 00000000000..7ec1724753a
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -0,0 +1,912 @@
1/* bnx2x_init_ops.h: Broadcom Everest network driver.
2 * Static functions needed during the initialization.
3 * This file is "included" in bnx2x_main.c.
4 *
5 * Copyright (c) 2007-2011 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
12 * Written by: Vladislav Zolotarov <vladz@broadcom.com>
13 */
14
15#ifndef BNX2X_INIT_OPS_H
16#define BNX2X_INIT_OPS_H
17
18
19#ifndef BP_ILT
20#define BP_ILT(bp) NULL
21#endif
22
23#ifndef BP_FUNC
24#define BP_FUNC(bp) 0
25#endif
26
27#ifndef BP_PORT
28#define BP_PORT(bp) 0
29#endif
30
31#ifndef BNX2X_ILT_FREE
32#define BNX2X_ILT_FREE(x, y, sz)
33#endif
34
35#ifndef BNX2X_ILT_ZALLOC
36#define BNX2X_ILT_ZALLOC(x, y, sz)
37#endif
38
39#ifndef ILOG2
40#define ILOG2(x) x
41#endif
42
43static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
44static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
45static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
46 dma_addr_t phys_addr, u32 addr,
47 u32 len);
48
49static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
50 const u32 *data, u32 len)
51{
52 u32 i;
53
54 for (i = 0; i < len; i++)
55 REG_WR(bp, addr + i*4, data[i]);
56}
57
58static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
59 const u32 *data, u32 len)
60{
61 u32 i;
62
63 for (i = 0; i < len; i++)
64 bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
65}
66
67static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
68 u8 wb)
69{
70 if (bp->dmae_ready)
71 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
72 else if (wb)
73 /*
74 * Wide bus registers with no dmae need to be written
75 * using indirect write.
76 */
77 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
78 else
79 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
80}
81
82static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
83 u32 len, u8 wb)
84{
85 u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
86 u32 buf_len32 = buf_len/4;
87 u32 i;
88
89 memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
90
91 for (i = 0; i < len; i += buf_len32) {
92 u32 cur_len = min(buf_len32, len - i);
93
94 bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
95 }
96}
97
98static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
99{
100 if (bp->dmae_ready)
101 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
102 else
103 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
104}
105
106static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
107 const u32 *data, u32 len64)
108{
109 u32 buf_len32 = FW_BUF_SIZE/4;
110 u32 len = len64*2;
111 u64 data64 = 0;
112 u32 i;
113
114 /* 64 bit value is in a blob: first low DWORD, then high DWORD */
115 data64 = HILO_U64((*(data + 1)), (*data));
116
117 len64 = min((u32)(FW_BUF_SIZE/8), len64);
118 for (i = 0; i < len64; i++) {
119 u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
120
121 *pdata = data64;
122 }
123
124 for (i = 0; i < len; i += buf_len32) {
125 u32 cur_len = min(buf_len32, len - i);
126
127 bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
128 }
129}
130
131/*********************************************************
132 There are different blobs for each PRAM section.
133 In addition, each blob write operation is divided into a few operations
134 in order to decrease the amount of phys. contiguous buffer needed.
135 Thus, when we select a blob the address may be with some offset
136 from the beginning of PRAM section.
137 The same holds for the INT_TABLE sections.
138**********************************************************/
139#define IF_IS_INT_TABLE_ADDR(base, addr) \
140 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
141
142#define IF_IS_PRAM_ADDR(base, addr) \
143 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
144
145static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
146 const u8 *data)
147{
148 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
149 data = INIT_TSEM_INT_TABLE_DATA(bp);
150 else
151 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
152 data = INIT_CSEM_INT_TABLE_DATA(bp);
153 else
154 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
155 data = INIT_USEM_INT_TABLE_DATA(bp);
156 else
157 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
158 data = INIT_XSEM_INT_TABLE_DATA(bp);
159 else
160 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
161 data = INIT_TSEM_PRAM_DATA(bp);
162 else
163 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
164 data = INIT_CSEM_PRAM_DATA(bp);
165 else
166 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
167 data = INIT_USEM_PRAM_DATA(bp);
168 else
169 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
170 data = INIT_XSEM_PRAM_DATA(bp);
171
172 return data;
173}
174
175static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
176 const u32 *data, u32 len)
177{
178 if (bp->dmae_ready)
179 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
180 else
181 bnx2x_init_ind_wr(bp, addr, data, len);
182}
183
184static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
185 u32 val_hi)
186{
187 u32 wb_write[2];
188
189 wb_write[0] = val_lo;
190 wb_write[1] = val_hi;
191 REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
192}
193static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
194 u32 blob_off)
195{
196 const u8 *data = NULL;
197 int rc;
198 u32 i;
199
200 data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
201
202 rc = bnx2x_gunzip(bp, data, len);
203 if (rc)
204 return;
205
206 /* gunzip_outlen is in dwords */
207 len = GUNZIP_OUTLEN(bp);
208 for (i = 0; i < len; i++)
209 ((u32 *)GUNZIP_BUF(bp))[i] =
210 cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
211
212 bnx2x_write_big_buf_wb(bp, addr, len);
213}
214
215static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
216{
217 u16 op_start =
218 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
219 STAGE_START)];
220 u16 op_end =
221 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
222 STAGE_END)];
223 union init_op *op;
224 u32 op_idx, op_type, addr, len;
225 const u32 *data, *data_base;
226
227 /* If empty block */
228 if (op_start == op_end)
229 return;
230
231 data_base = INIT_DATA(bp);
232
233 for (op_idx = op_start; op_idx < op_end; op_idx++) {
234
235 op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
236 /* Get generic data */
237 op_type = op->raw.op;
238 addr = op->raw.offset;
239 /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
240 * OP_WR64 (we assume that op_arr_write and op_write have the
241 * same structure).
242 */
243 len = op->arr_wr.data_len;
244 data = data_base + op->arr_wr.data_off;
245
246 switch (op_type) {
247 case OP_RD:
248 REG_RD(bp, addr);
249 break;
250 case OP_WR:
251 REG_WR(bp, addr, op->write.val);
252 break;
253 case OP_SW:
254 bnx2x_init_str_wr(bp, addr, data, len);
255 break;
256 case OP_WB:
257 bnx2x_init_wr_wb(bp, addr, data, len);
258 break;
259 case OP_ZR:
260 bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
261 break;
262 case OP_WB_ZR:
263 bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
264 break;
265 case OP_ZP:
266 bnx2x_init_wr_zp(bp, addr, len,
267 op->arr_wr.data_off);
268 break;
269 case OP_WR_64:
270 bnx2x_init_wr_64(bp, addr, data, len);
271 break;
272 case OP_IF_MODE_AND:
273 /* if any of the flags doesn't match, skip the
274 * conditional block.
275 */
276 if ((INIT_MODE_FLAGS(bp) &
277 op->if_mode.mode_bit_map) !=
278 op->if_mode.mode_bit_map)
279 op_idx += op->if_mode.cmd_offset;
280 break;
281 case OP_IF_MODE_OR:
282 /* if all the flags don't match, skip the conditional
283 * block.
284 */
285 if ((INIT_MODE_FLAGS(bp) &
286 op->if_mode.mode_bit_map) == 0)
287 op_idx += op->if_mode.cmd_offset;
288 break;
289 default:
290 /* Should never get here! */
291
292 break;
293 }
294 }
295}
296
297
298/****************************************************************************
299* PXP Arbiter
300****************************************************************************/
301/*
302 * This code configures the PCI read/write arbiter
303 * which implements a weighted round robin
304 * between the virtual queues in the chip.
305 *
306 * The values were derived for each PCI max payload and max request size.
307 * since max payload and max request size are only known at run time,
308 * this is done as a separate init stage.
309 */
310
311#define NUM_WR_Q 13
312#define NUM_RD_Q 29
313#define MAX_RD_ORD 3
314#define MAX_WR_ORD 2
315
316/* configuration for one arbiter queue */
317struct arb_line {
318 int l;
319 int add;
320 int ubound;
321};
322
323/* derived configuration for each read queue for each max request size */
324static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
325/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
326 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
327 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
328 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
329 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
330 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
331 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
332 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
333 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
334/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
335 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
336 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
337 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
338 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
339 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
340 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
341 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
342 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
343 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
344/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
345 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
346 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
347 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
348 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
349 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
350 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
351 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
352 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
353 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
354};
355
356/* derived configuration for each write queue for each max request size */
357static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
358/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
359 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
360 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
361 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
362 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
363 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
364 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
365 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
366 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
367/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
368 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
369 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
370 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
371};
372
373/* register addresses for read queues */
374static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
375/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
376 PXP2_REG_RQ_BW_RD_UBOUND0},
377 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
378 PXP2_REG_PSWRQ_BW_UB1},
379 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
380 PXP2_REG_PSWRQ_BW_UB2},
381 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
382 PXP2_REG_PSWRQ_BW_UB3},
383 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
384 PXP2_REG_RQ_BW_RD_UBOUND4},
385 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
386 PXP2_REG_RQ_BW_RD_UBOUND5},
387 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
388 PXP2_REG_PSWRQ_BW_UB6},
389 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
390 PXP2_REG_PSWRQ_BW_UB7},
391 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
392 PXP2_REG_PSWRQ_BW_UB8},
393/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
394 PXP2_REG_PSWRQ_BW_UB9},
395 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
396 PXP2_REG_PSWRQ_BW_UB10},
397 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
398 PXP2_REG_PSWRQ_BW_UB11},
399 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
400 PXP2_REG_RQ_BW_RD_UBOUND12},
401 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
402 PXP2_REG_RQ_BW_RD_UBOUND13},
403 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
404 PXP2_REG_RQ_BW_RD_UBOUND14},
405 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
406 PXP2_REG_RQ_BW_RD_UBOUND15},
407 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
408 PXP2_REG_RQ_BW_RD_UBOUND16},
409 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
410 PXP2_REG_RQ_BW_RD_UBOUND17},
411 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
412 PXP2_REG_RQ_BW_RD_UBOUND18},
413/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
414 PXP2_REG_RQ_BW_RD_UBOUND19},
415 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
416 PXP2_REG_RQ_BW_RD_UBOUND20},
417 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
418 PXP2_REG_RQ_BW_RD_UBOUND22},
419 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
420 PXP2_REG_RQ_BW_RD_UBOUND23},
421 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
422 PXP2_REG_RQ_BW_RD_UBOUND24},
423 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
424 PXP2_REG_RQ_BW_RD_UBOUND25},
425 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
426 PXP2_REG_RQ_BW_RD_UBOUND26},
427 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
428 PXP2_REG_RQ_BW_RD_UBOUND27},
429 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
430 PXP2_REG_PSWRQ_BW_UB28}
431};
432
433/* register addresses for write queues */
434static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
435/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
436 PXP2_REG_PSWRQ_BW_UB1},
437 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
438 PXP2_REG_PSWRQ_BW_UB2},
439 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
440 PXP2_REG_PSWRQ_BW_UB3},
441 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
442 PXP2_REG_PSWRQ_BW_UB6},
443 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
444 PXP2_REG_PSWRQ_BW_UB7},
445 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
446 PXP2_REG_PSWRQ_BW_UB8},
447 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
448 PXP2_REG_PSWRQ_BW_UB9},
449 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
450 PXP2_REG_PSWRQ_BW_UB10},
451 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
452 PXP2_REG_PSWRQ_BW_UB11},
453/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
454 PXP2_REG_PSWRQ_BW_UB28},
455 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
456 PXP2_REG_RQ_BW_WR_UBOUND29},
457 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
458 PXP2_REG_RQ_BW_WR_UBOUND30}
459};
460
461static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
462 int w_order)
463{
464 u32 val, i;
465
466 if (r_order > MAX_RD_ORD) {
467 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
468 r_order, MAX_RD_ORD);
469 r_order = MAX_RD_ORD;
470 }
471 if (w_order > MAX_WR_ORD) {
472 DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
473 w_order, MAX_WR_ORD);
474 w_order = MAX_WR_ORD;
475 }
476 if (CHIP_REV_IS_FPGA(bp)) {
477 DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
478 w_order = 0;
479 }
480 DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
481
482 for (i = 0; i < NUM_RD_Q-1; i++) {
483 REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
484 REG_WR(bp, read_arb_addr[i].add,
485 read_arb_data[i][r_order].add);
486 REG_WR(bp, read_arb_addr[i].ubound,
487 read_arb_data[i][r_order].ubound);
488 }
489
490 for (i = 0; i < NUM_WR_Q-1; i++) {
491 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
492 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
493
494 REG_WR(bp, write_arb_addr[i].l,
495 write_arb_data[i][w_order].l);
496
497 REG_WR(bp, write_arb_addr[i].add,
498 write_arb_data[i][w_order].add);
499
500 REG_WR(bp, write_arb_addr[i].ubound,
501 write_arb_data[i][w_order].ubound);
502 } else {
503
504 val = REG_RD(bp, write_arb_addr[i].l);
505 REG_WR(bp, write_arb_addr[i].l,
506 val | (write_arb_data[i][w_order].l << 10));
507
508 val = REG_RD(bp, write_arb_addr[i].add);
509 REG_WR(bp, write_arb_addr[i].add,
510 val | (write_arb_data[i][w_order].add << 10));
511
512 val = REG_RD(bp, write_arb_addr[i].ubound);
513 REG_WR(bp, write_arb_addr[i].ubound,
514 val | (write_arb_data[i][w_order].ubound << 7));
515 }
516 }
517
518 val = write_arb_data[NUM_WR_Q-1][w_order].add;
519 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
520 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
521 REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
522
523 val = read_arb_data[NUM_RD_Q-1][r_order].add;
524 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
525 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
526 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
527
528 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
529 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
530 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
531 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
532
533 if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
534 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
535
536 if (CHIP_IS_E3(bp))
537 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
538 else if (CHIP_IS_E2(bp))
539 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
540 else
541 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
542
543 if (!CHIP_IS_E1(bp)) {
544 /* MPS w_order optimal TH presently TH
545 * 128 0 0 2
546 * 256 1 1 3
547 * >=512 2 2 3
548 */
549 /* DMAE is special */
550 if (!CHIP_IS_E1H(bp)) {
551 /* E2 can use optimal TH */
552 val = w_order;
553 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
554 } else {
555 val = ((w_order == 0) ? 2 : 3);
556 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
557 }
558
559 REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
560 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
561 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
562 REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
563 REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
564 REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
565 REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
566 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
567 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
568 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
569 }
570
571 /* Validate number of tags suppoted by device */
572#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
573 val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
574 val &= 0xFF;
575 if (val <= 0x20)
576 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
577}
578
579/****************************************************************************
580* ILT management
581****************************************************************************/
582/*
583 * This codes hides the low level HW interaction for ILT management and
584 * configuration. The API consists of a shadow ILT table which is set by the
585 * driver and a set of routines to use it to configure the HW.
586 *
587 */
588
589/* ILT HW init operations */
590
591/* ILT memory management operations */
592#define ILT_MEMOP_ALLOC 0
593#define ILT_MEMOP_FREE 1
594
595/* the phys address is shifted right 12 bits and has an added
596 * 1=valid bit added to the 53rd bit
597 * then since this is a wide register(TM)
598 * we split it into two 32 bit writes
599 */
600#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
601#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
602#define ILT_RANGE(f, l) (((l) << 10) | f)
603
604static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
605 struct ilt_line *line, u32 size, u8 memop)
606{
607 if (memop == ILT_MEMOP_FREE) {
608 BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
609 return 0;
610 }
611 BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
612 if (!line->page)
613 return -1;
614 line->size = size;
615 return 0;
616}
617
618
619static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
620 u8 memop)
621{
622 int i, rc;
623 struct bnx2x_ilt *ilt = BP_ILT(bp);
624 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
625
626 if (!ilt || !ilt->lines)
627 return -1;
628
629 if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
630 return 0;
631
632 for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
633 rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
634 ilt_cli->page_size, memop);
635 }
636 return rc;
637}
638
639static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
640{
641 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
642 if (!rc)
643 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
644 if (!rc)
645 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
646 if (!rc)
647 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
648
649 return rc;
650}
651
652static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
653 dma_addr_t page_mapping)
654{
655 u32 reg;
656
657 if (CHIP_IS_E1(bp))
658 reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
659 else
660 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
661
662 bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
663}
664
665static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
666 struct bnx2x_ilt *ilt, int idx, u8 initop)
667{
668 dma_addr_t null_mapping;
669 int abs_idx = ilt->start_line + idx;
670
671
672 switch (initop) {
673 case INITOP_INIT:
674 /* set in the init-value array */
675 case INITOP_SET:
676 bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
677 break;
678 case INITOP_CLEAR:
679 null_mapping = 0;
680 bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
681 break;
682 }
683}
684
685static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
686 struct ilt_client_info *ilt_cli,
687 u32 ilt_start, u8 initop)
688{
689 u32 start_reg = 0;
690 u32 end_reg = 0;
691
692 /* The boundary is either SET or INIT,
693 CLEAR => SET and for now SET ~~ INIT */
694
695 /* find the appropriate regs */
696 if (CHIP_IS_E1(bp)) {
697 switch (ilt_cli->client_num) {
698 case ILT_CLIENT_CDU:
699 start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
700 break;
701 case ILT_CLIENT_QM:
702 start_reg = PXP2_REG_PSWRQ_QM0_L2P;
703 break;
704 case ILT_CLIENT_SRC:
705 start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
706 break;
707 case ILT_CLIENT_TM:
708 start_reg = PXP2_REG_PSWRQ_TM0_L2P;
709 break;
710 }
711 REG_WR(bp, start_reg + BP_FUNC(bp)*4,
712 ILT_RANGE((ilt_start + ilt_cli->start),
713 (ilt_start + ilt_cli->end)));
714 } else {
715 switch (ilt_cli->client_num) {
716 case ILT_CLIENT_CDU:
717 start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
718 end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
719 break;
720 case ILT_CLIENT_QM:
721 start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
722 end_reg = PXP2_REG_RQ_QM_LAST_ILT;
723 break;
724 case ILT_CLIENT_SRC:
725 start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
726 end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
727 break;
728 case ILT_CLIENT_TM:
729 start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
730 end_reg = PXP2_REG_RQ_TM_LAST_ILT;
731 break;
732 }
733 REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
734 REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
735 }
736}
737
738static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
739 struct bnx2x_ilt *ilt,
740 struct ilt_client_info *ilt_cli,
741 u8 initop)
742{
743 int i;
744
745 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
746 return;
747
748 for (i = ilt_cli->start; i <= ilt_cli->end; i++)
749 bnx2x_ilt_line_init_op(bp, ilt, i, initop);
750
751 /* init/clear the ILT boundries */
752 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
753}
754
755static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
756 struct ilt_client_info *ilt_cli, u8 initop)
757{
758 struct bnx2x_ilt *ilt = BP_ILT(bp);
759
760 bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
761}
762
763static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
764 int cli_num, u8 initop)
765{
766 struct bnx2x_ilt *ilt = BP_ILT(bp);
767 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
768
769 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
770}
771
772static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
773{
774 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
775 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
776 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
777 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
778}
779
780static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
781 u32 psz_reg, u8 initop)
782{
783 struct bnx2x_ilt *ilt = BP_ILT(bp);
784 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
785
786 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
787 return;
788
789 switch (initop) {
790 case INITOP_INIT:
791 /* set in the init-value array */
792 case INITOP_SET:
793 REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
794 break;
795 case INITOP_CLEAR:
796 break;
797 }
798}
799
800/*
801 * called during init common stage, ilt clients should be initialized
802 * prioir to calling this function
803 */
804static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
805{
806 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
807 PXP2_REG_RQ_CDU_P_SIZE, initop);
808 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
809 PXP2_REG_RQ_QM_P_SIZE, initop);
810 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
811 PXP2_REG_RQ_SRC_P_SIZE, initop);
812 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
813 PXP2_REG_RQ_TM_P_SIZE, initop);
814}
815
816/****************************************************************************
817* QM initializations
818****************************************************************************/
819#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
820#define QM_INIT_MIN_CID_COUNT 31
821#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
822
823/* called during init port stage */
824static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
825 u8 initop)
826{
827 int port = BP_PORT(bp);
828
829 if (QM_INIT(qm_cid_count)) {
830 switch (initop) {
831 case INITOP_INIT:
832 /* set in the init-value array */
833 case INITOP_SET:
834 REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
835 qm_cid_count/16 - 1);
836 break;
837 case INITOP_CLEAR:
838 break;
839 }
840 }
841}
842
843static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
844{
845 int i;
846 u32 wb_data[2];
847
848 wb_data[0] = wb_data[1] = 0;
849
850 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
851 REG_WR(bp, QM_REG_BASEADDR + i*4,
852 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
853 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
854 wb_data, 2);
855
856 if (CHIP_IS_E1H(bp)) {
857 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
858 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
859 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
860 wb_data, 2);
861 }
862 }
863}
864
865/* called during init common stage */
866static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
867 u8 initop)
868{
869 if (!QM_INIT(qm_cid_count))
870 return;
871
872 switch (initop) {
873 case INITOP_INIT:
874 /* set in the init-value array */
875 case INITOP_SET:
876 bnx2x_qm_set_ptr_table(bp, qm_cid_count);
877 break;
878 case INITOP_CLEAR:
879 break;
880 }
881}
882
883/****************************************************************************
884* SRC initializations
885****************************************************************************/
886#ifdef BCM_CNIC
887/* called during init func stage */
888static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
889 dma_addr_t t2_mapping, int src_cid_count)
890{
891 int i;
892 int port = BP_PORT(bp);
893
894 /* Initialize T2 */
895 for (i = 0; i < src_cid_count-1; i++)
896 t2[i].next = (u64)(t2_mapping +
897 (i+1)*sizeof(struct src_ent));
898
899 /* tell the searcher where the T2 table is */
900 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
901
902 bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
903 U64_LO(t2_mapping), U64_HI(t2_mapping));
904
905 bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
906 U64_LO((u64)t2_mapping +
907 (src_cid_count-1) * sizeof(struct src_ent)),
908 U64_HI((u64)t2_mapping +
909 (src_cid_count-1) * sizeof(struct src_ent)));
910}
911#endif
912#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
new file mode 100644
index 00000000000..ba15bdc5a1a
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -0,0 +1,12472 @@
1/* Copyright 2008-2011 Broadcom Corporation
2 *
3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you
5 * under the terms of the GNU General Public License version 2, available
6 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
7 *
8 * Notwithstanding the above, under no circumstances may you combine this
9 * software in any way with any other Broadcom software provided under a
10 * license other than the GPL, without Broadcom's express prior written
11 * consent.
12 *
13 * Written by Yaniv Rosner
14 *
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/delay.h>
24#include <linux/ethtool.h>
25#include <linux/mutex.h>
26
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29
30
31/********************************************************/
32#define ETH_HLEN 14
33/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
34#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
35#define ETH_MIN_PACKET_SIZE 60
36#define ETH_MAX_PACKET_SIZE 1500
37#define ETH_MAX_JUMBO_PACKET_SIZE 9600
38#define MDIO_ACCESS_TIMEOUT 1000
39#define BMAC_CONTROL_RX_ENABLE 2
40#define WC_LANE_MAX 4
41#define I2C_SWITCH_WIDTH 2
42#define I2C_BSC0 0
43#define I2C_BSC1 1
44#define I2C_WA_RETRY_CNT 3
45#define MCPR_IMC_COMMAND_READ_OP 1
46#define MCPR_IMC_COMMAND_WRITE_OP 2
47
48/***********************************************************/
49/* Shortcut definitions */
50/***********************************************************/
51
52#define NIG_LATCH_BC_ENABLE_MI_INT 0
53
54#define NIG_STATUS_EMAC0_MI_INT \
55 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
56#define NIG_STATUS_XGXS0_LINK10G \
57 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
58#define NIG_STATUS_XGXS0_LINK_STATUS \
59 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
60#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
61 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
62#define NIG_STATUS_SERDES0_LINK_STATUS \
63 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
64#define NIG_MASK_MI_INT \
65 NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
66#define NIG_MASK_XGXS0_LINK10G \
67 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
68#define NIG_MASK_XGXS0_LINK_STATUS \
69 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
70#define NIG_MASK_SERDES0_LINK_STATUS \
71 NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
72
73#define MDIO_AN_CL73_OR_37_COMPLETE \
74 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
75 MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
76
77#define XGXS_RESET_BITS \
78 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
79 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
80 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
81 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
82 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
83
84#define SERDES_RESET_BITS \
85 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
86 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
87 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
88 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
89
90#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
91#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
92#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
93#define AUTONEG_PARALLEL \
94 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
95#define AUTONEG_SGMII_FIBER_AUTODET \
96 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
97#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
98
99#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
100 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
101#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
102 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
103#define GP_STATUS_SPEED_MASK \
104 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
105#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
106#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
107#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
108#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
109#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
110#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
111#define GP_STATUS_10G_HIG \
112 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
113#define GP_STATUS_10G_CX4 \
114 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
115#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
116#define GP_STATUS_10G_KX4 \
117 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
118#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
119#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
120#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
121#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
122#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
123#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
124#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
125#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
126#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
127#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
128#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
129#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
130#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
131#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
132#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
133#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
134#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
135#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
136#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
137
138
139
140/* */
141#define SFP_EEPROM_CON_TYPE_ADDR 0x2
142 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
143 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
144
145
146#define SFP_EEPROM_COMP_CODE_ADDR 0x3
147 #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
148 #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
149 #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
150
151#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
152 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
153 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
154
155#define SFP_EEPROM_OPTIONS_ADDR 0x40
156 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
157#define SFP_EEPROM_OPTIONS_SIZE 2
158
159#define EDC_MODE_LINEAR 0x0022
160#define EDC_MODE_LIMITING 0x0044
161#define EDC_MODE_PASSIVE_DAC 0x0055
162
163
164/* BRB thresholds for E2*/
165#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
166#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
167
168#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
169#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
170
171#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
172#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
173
174#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
175#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
176
177/* BRB thresholds for E3A0 */
178#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
179#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
180
181#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
182#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
183
184#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
185#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
186
187#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
188#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
189
190
191/* BRB thresholds for E3B0 2 port mode*/
192#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
193#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
194
195#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
196#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
197
198#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
199#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
200
201#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
202#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
203
204/* only for E3B0*/
205#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
206#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
207
208/* Lossy +Lossless GUARANTIED == GUART */
209#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
210/* Lossless +Lossless*/
211#define PFC_E3B0_2P_PAUSE_LB_GUART 236
212/* Lossy +Lossy*/
213#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
214
215/* Lossy +Lossless*/
216#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
217/* Lossless +Lossless*/
218#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
219/* Lossy +Lossy*/
220#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
221#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
222
223#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
224#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
225
226/* BRB thresholds for E3B0 4 port mode */
227#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
228#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
229
230#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
231#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
232
233#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
234#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
235
236#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
237#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
238
239
240/* only for E3B0*/
241#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
242#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
243#define PFC_E3B0_4P_LB_GUART 120
244
245#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
246#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
247
248#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
249#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
250
251#define DCBX_INVALID_COS (0xFF)
252
253#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
254#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
255#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
256#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
257#define ETS_E3B0_PBF_MIN_W_VAL (10000)
258
259#define MAX_PACKET_SIZE (9700)
260#define WC_UC_TIMEOUT 100
261
262/**********************************************************/
263/* INTERFACE */
264/**********************************************************/
265
266#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
267 bnx2x_cl45_write(_bp, _phy, \
268 (_phy)->def_md_devad, \
269 (_bank + (_addr & 0xf)), \
270 _val)
271
272#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
273 bnx2x_cl45_read(_bp, _phy, \
274 (_phy)->def_md_devad, \
275 (_bank + (_addr & 0xf)), \
276 _val)
277
278static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
279{
280 u32 val = REG_RD(bp, reg);
281
282 val |= bits;
283 REG_WR(bp, reg, val);
284 return val;
285}
286
287static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
288{
289 u32 val = REG_RD(bp, reg);
290
291 val &= ~bits;
292 REG_WR(bp, reg, val);
293 return val;
294}
295
296/******************************************************************/
297/* EPIO/GPIO section */
298/******************************************************************/
299static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
300{
301 u32 epio_mask, gp_oenable;
302 *en = 0;
303 /* Sanity check */
304 if (epio_pin > 31) {
305 DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
306 return;
307 }
308
309 epio_mask = 1 << epio_pin;
310 /* Set this EPIO to output */
311 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
312 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
313
314 *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
315}
316static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
317{
318 u32 epio_mask, gp_output, gp_oenable;
319
320 /* Sanity check */
321 if (epio_pin > 31) {
322 DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
323 return;
324 }
325 DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
326 epio_mask = 1 << epio_pin;
327 /* Set this EPIO to output */
328 gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
329 if (en)
330 gp_output |= epio_mask;
331 else
332 gp_output &= ~epio_mask;
333
334 REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
335
336 /* Set the value for this EPIO */
337 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
338 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
339}
340
341static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
342{
343 if (pin_cfg == PIN_CFG_NA)
344 return;
345 if (pin_cfg >= PIN_CFG_EPIO0) {
346 bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
347 } else {
348 u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
349 u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
350 bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
351 }
352}
353
354static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
355{
356 if (pin_cfg == PIN_CFG_NA)
357 return -EINVAL;
358 if (pin_cfg >= PIN_CFG_EPIO0) {
359 bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
360 } else {
361 u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
362 u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
363 *val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
364 }
365 return 0;
366
367}
368/******************************************************************/
369/* ETS section */
370/******************************************************************/
371static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
372{
373 /* ETS disabled configuration*/
374 struct bnx2x *bp = params->bp;
375
376 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
377
378 /*
379 * mapping between entry priority to client number (0,1,2 -debug and
380 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
381 * 3bits client num.
382 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
383 * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
384 */
385
386 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
387 /*
388 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
389 * as strict. Bits 0,1,2 - debug and management entries, 3 -
390 * COS0 entry, 4 - COS1 entry.
391 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
392 * bit4 bit3 bit2 bit1 bit0
393 * MCP and debug are strict
394 */
395
396 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
397 /* defines which entries (clients) are subjected to WFQ arbitration */
398 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
399 /*
400 * For strict priority entries defines the number of consecutive
401 * slots for the highest priority.
402 */
403 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
404 /*
405 * mapping between the CREDIT_WEIGHT registers and actual client
406 * numbers
407 */
408 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
409 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
410 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
411
412 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
413 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
414 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
415 /* ETS mode disable */
416 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
417 /*
418 * If ETS mode is enabled (there is no strict priority) defines a WFQ
419 * weight for COS0/COS1.
420 */
421 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
422 REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
423 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
424 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
425 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
426 /* Defines the number of consecutive slots for the strict priority */
427 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
428}
429/******************************************************************************
430* Description:
431* Getting min_w_val will be set according to line speed .
432*.
433******************************************************************************/
434static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
435{
436 u32 min_w_val = 0;
437 /* Calculate min_w_val.*/
438 if (vars->link_up) {
439 if (SPEED_20000 == vars->line_speed)
440 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
441 else
442 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
443 } else
444 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
445 /**
446 * If the link isn't up (static configuration for example ) The
447 * link will be according to 20GBPS.
448 */
449 return min_w_val;
450}
451/******************************************************************************
452* Description:
453* Getting credit upper bound form min_w_val.
454*.
455******************************************************************************/
456static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
457{
458 const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
459 MAX_PACKET_SIZE);
460 return credit_upper_bound;
461}
462/******************************************************************************
463* Description:
464* Set credit upper bound for NIG.
465*.
466******************************************************************************/
467static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
468 const struct link_params *params,
469 const u32 min_w_val)
470{
471 struct bnx2x *bp = params->bp;
472 const u8 port = params->port;
473 const u32 credit_upper_bound =
474 bnx2x_ets_get_credit_upper_bound(min_w_val);
475
476 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
477 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
478 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
479 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
480 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
481 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
482 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
483 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
484 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
485 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
486 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
487 NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
488
489 if (0 == port) {
490 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
491 credit_upper_bound);
492 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
493 credit_upper_bound);
494 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
495 credit_upper_bound);
496 }
497}
498/******************************************************************************
499* Description:
500* Will return the NIG ETS registers to init values.Except
501* credit_upper_bound.
502* That isn't used in this configuration (No WFQ is enabled) and will be
503* configured acording to spec
504*.
505******************************************************************************/
506static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
507 const struct link_vars *vars)
508{
509 struct bnx2x *bp = params->bp;
510 const u8 port = params->port;
511 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
512 /**
513 * mapping between entry priority to client number (0,1,2 -debug and
514 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
515 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
516 * reset value or init tool
517 */
518 if (port) {
519 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
520 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
521 } else {
522 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
523 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
524 }
525 /**
526 * For strict priority entries defines the number of consecutive
527 * slots for the highest priority.
528 */
529 /* TODO_ETS - Should be done by reset value or init tool */
530 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
531 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
532 /**
533 * mapping between the CREDIT_WEIGHT registers and actual client
534 * numbers
535 */
536 /* TODO_ETS - Should be done by reset value or init tool */
537 if (port) {
538 /*Port 1 has 6 COS*/
539 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
540 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
541 } else {
542 /*Port 0 has 9 COS*/
543 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
544 0x43210876);
545 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
546 }
547
548 /**
549 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
550 * as strict. Bits 0,1,2 - debug and management entries, 3 -
551 * COS0 entry, 4 - COS1 entry.
552 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
553 * bit4 bit3 bit2 bit1 bit0
554 * MCP and debug are strict
555 */
556 if (port)
557 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
558 else
559 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
560 /* defines which entries (clients) are subjected to WFQ arbitration */
561 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
562 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
563
564 /**
565 * Please notice the register address are note continuous and a
566 * for here is note appropriate.In 2 port mode port0 only COS0-5
567 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
568 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
569 * are never used for WFQ
570 */
571 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
572 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
573 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
574 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
575 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
576 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
577 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
578 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
579 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
580 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
581 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
582 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
583 if (0 == port) {
584 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
585 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
586 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
587 }
588
589 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
590}
591/******************************************************************************
592* Description:
593* Set credit upper bound for PBF.
594*.
595******************************************************************************/
596static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
597 const struct link_params *params,
598 const u32 min_w_val)
599{
600 struct bnx2x *bp = params->bp;
601 const u32 credit_upper_bound =
602 bnx2x_ets_get_credit_upper_bound(min_w_val);
603 const u8 port = params->port;
604 u32 base_upper_bound = 0;
605 u8 max_cos = 0;
606 u8 i = 0;
607 /**
608 * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
609 * port mode port1 has COS0-2 that can be used for WFQ.
610 */
611 if (0 == port) {
612 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
613 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
614 } else {
615 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
616 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
617 }
618
619 for (i = 0; i < max_cos; i++)
620 REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
621}
622
623/******************************************************************************
624* Description:
625* Will return the PBF ETS registers to init values.Except
626* credit_upper_bound.
627* That isn't used in this configuration (No WFQ is enabled) and will be
628* configured acording to spec
629*.
630******************************************************************************/
631static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
632{
633 struct bnx2x *bp = params->bp;
634 const u8 port = params->port;
635 const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
636 u8 i = 0;
637 u32 base_weight = 0;
638 u8 max_cos = 0;
639
640 /**
641 * mapping between entry priority to client number 0 - COS0
642 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
643 * TODO_ETS - Should be done by reset value or init tool
644 */
645 if (port)
646 /* 0x688 (|011|0 10|00 1|000) */
647 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
648 else
649 /* (10 1|100 |011|0 10|00 1|000) */
650 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
651
652 /* TODO_ETS - Should be done by reset value or init tool */
653 if (port)
654 /* 0x688 (|011|0 10|00 1|000)*/
655 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
656 else
657 /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
658 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
659
660 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
661 PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
662
663
664 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
665 PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
666
667 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
668 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
669 /**
670 * In 2 port mode port0 has COS0-5 that can be used for WFQ.
671 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
672 */
673 if (0 == port) {
674 base_weight = PBF_REG_COS0_WEIGHT_P0;
675 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
676 } else {
677 base_weight = PBF_REG_COS0_WEIGHT_P1;
678 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
679 }
680
681 for (i = 0; i < max_cos; i++)
682 REG_WR(bp, base_weight + (0x4 * i), 0);
683
684 bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
685}
686/******************************************************************************
687* Description:
688* E3B0 disable will return basicly the values to init values.
689*.
690******************************************************************************/
691static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
692 const struct link_vars *vars)
693{
694 struct bnx2x *bp = params->bp;
695
696 if (!CHIP_IS_E3B0(bp)) {
697 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
698 "\n");
699 return -EINVAL;
700 }
701
702 bnx2x_ets_e3b0_nig_disabled(params, vars);
703
704 bnx2x_ets_e3b0_pbf_disabled(params);
705
706 return 0;
707}
708
709/******************************************************************************
710* Description:
711* Disable will return basicly the values to init values.
712*.
713******************************************************************************/
714int bnx2x_ets_disabled(struct link_params *params,
715 struct link_vars *vars)
716{
717 struct bnx2x *bp = params->bp;
718 int bnx2x_status = 0;
719
720 if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
721 bnx2x_ets_e2e3a0_disabled(params);
722 else if (CHIP_IS_E3B0(bp))
723 bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
724 else {
725 DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
726 return -EINVAL;
727 }
728
729 return bnx2x_status;
730}
731
732/******************************************************************************
733* Description
734* Set the COS mappimg to SP and BW until this point all the COS are not
735* set as SP or BW.
736******************************************************************************/
737static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
738 const struct bnx2x_ets_params *ets_params,
739 const u8 cos_sp_bitmap,
740 const u8 cos_bw_bitmap)
741{
742 struct bnx2x *bp = params->bp;
743 const u8 port = params->port;
744 const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
745 const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
746 const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
747 const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
748
749 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
750 NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
751
752 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
753 PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
754
755 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
756 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
757 nig_cli_subject2wfq_bitmap);
758
759 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
760 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
761 pbf_cli_subject2wfq_bitmap);
762
763 return 0;
764}
765
766/******************************************************************************
767* Description:
768* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
769* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
770******************************************************************************/
771static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
772 const u8 cos_entry,
773 const u32 min_w_val_nig,
774 const u32 min_w_val_pbf,
775 const u16 total_bw,
776 const u8 bw,
777 const u8 port)
778{
779 u32 nig_reg_adress_crd_weight = 0;
780 u32 pbf_reg_adress_crd_weight = 0;
781 /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
782 const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
783 const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
784
785 switch (cos_entry) {
786 case 0:
787 nig_reg_adress_crd_weight =
788 (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
789 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
790 pbf_reg_adress_crd_weight = (port) ?
791 PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
792 break;
793 case 1:
794 nig_reg_adress_crd_weight = (port) ?
795 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
796 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
797 pbf_reg_adress_crd_weight = (port) ?
798 PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
799 break;
800 case 2:
801 nig_reg_adress_crd_weight = (port) ?
802 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
803 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
804
805 pbf_reg_adress_crd_weight = (port) ?
806 PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
807 break;
808 case 3:
809 if (port)
810 return -EINVAL;
811 nig_reg_adress_crd_weight =
812 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
813 pbf_reg_adress_crd_weight =
814 PBF_REG_COS3_WEIGHT_P0;
815 break;
816 case 4:
817 if (port)
818 return -EINVAL;
819 nig_reg_adress_crd_weight =
820 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
821 pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
822 break;
823 case 5:
824 if (port)
825 return -EINVAL;
826 nig_reg_adress_crd_weight =
827 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
828 pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
829 break;
830 }
831
832 REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
833
834 REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
835
836 return 0;
837}
838/******************************************************************************
839* Description:
840* Calculate the total BW.A value of 0 isn't legal.
841*.
842******************************************************************************/
843static int bnx2x_ets_e3b0_get_total_bw(
844 const struct link_params *params,
845 const struct bnx2x_ets_params *ets_params,
846 u16 *total_bw)
847{
848 struct bnx2x *bp = params->bp;
849 u8 cos_idx = 0;
850
851 *total_bw = 0 ;
852 /* Calculate total BW requested */
853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
855 *total_bw +=
856 ets_params->cos[cos_idx].params.bw_params.bw;
857 }
858 }
859
860 /* Check total BW is valid */
861 if ((100 != *total_bw) || (0 == *total_bw)) {
862 if (0 == *total_bw) {
863 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
864 "shouldn't be 0\n");
865 return -EINVAL;
866 }
867 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be"
868 "100\n");
869 /**
870 * We can handle a case whre the BW isn't 100 this can happen
871 * if the TC are joined.
872 */
873 }
874 return 0;
875}
876
877/******************************************************************************
878* Description:
879* Invalidate all the sp_pri_to_cos.
880*.
881******************************************************************************/
882static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
883{
884 u8 pri = 0;
885 for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
886 sp_pri_to_cos[pri] = DCBX_INVALID_COS;
887}
888/******************************************************************************
889* Description:
890* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
891* according to sp_pri_to_cos.
892*.
893******************************************************************************/
894static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
895 u8 *sp_pri_to_cos, const u8 pri,
896 const u8 cos_entry)
897{
898 struct bnx2x *bp = params->bp;
899 const u8 port = params->port;
900 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
901 DCBX_E3B0_MAX_NUM_COS_PORT0;
902
903 if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
904 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
905 "parameter There can't be two COS's with"
906 "the same strict pri\n");
907 return -EINVAL;
908 }
909
910 if (pri > max_num_of_cos) {
911 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid"
912 "parameter Illegal strict priority\n");
913 return -EINVAL;
914 }
915
916 sp_pri_to_cos[pri] = cos_entry;
917 return 0;
918
919}
920
921/******************************************************************************
922* Description:
923* Returns the correct value according to COS and priority in
924* the sp_pri_cli register.
925*.
926******************************************************************************/
927static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
928 const u8 pri_set,
929 const u8 pri_offset,
930 const u8 entry_size)
931{
932 u64 pri_cli_nig = 0;
933 pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
934 (pri_set + pri_offset));
935
936 return pri_cli_nig;
937}
938/******************************************************************************
939* Description:
940* Returns the correct value according to COS and priority in the
941* sp_pri_cli register for NIG.
942*.
943******************************************************************************/
944static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
945{
946 /* MCP Dbg0 and dbg1 are always with higher strict pri*/
947 const u8 nig_cos_offset = 3;
948 const u8 nig_pri_offset = 3;
949
950 return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
951 nig_pri_offset, 4);
952
953}
954/******************************************************************************
955* Description:
956* Returns the correct value according to COS and priority in the
957* sp_pri_cli register for PBF.
958*.
959******************************************************************************/
960static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
961{
962 const u8 pbf_cos_offset = 0;
963 const u8 pbf_pri_offset = 0;
964
965 return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
966 pbf_pri_offset, 3);
967
968}
969
970/******************************************************************************
971* Description:
972* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
973* according to sp_pri_to_cos.(which COS has higher priority)
974*.
975******************************************************************************/
976static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
977 u8 *sp_pri_to_cos)
978{
979 struct bnx2x *bp = params->bp;
980 u8 i = 0;
981 const u8 port = params->port;
982 /* MCP Dbg0 and dbg1 are always with higher strict pri*/
983 u64 pri_cli_nig = 0x210;
984 u32 pri_cli_pbf = 0x0;
985 u8 pri_set = 0;
986 u8 pri_bitmask = 0;
987 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
988 DCBX_E3B0_MAX_NUM_COS_PORT0;
989
990 u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
991
992 /* Set all the strict priority first */
993 for (i = 0; i < max_num_of_cos; i++) {
994 if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
995 if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
996 DP(NETIF_MSG_LINK,
997 "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
998 "invalid cos entry\n");
999 return -EINVAL;
1000 }
1001
1002 pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
1003 sp_pri_to_cos[i], pri_set);
1004
1005 pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
1006 sp_pri_to_cos[i], pri_set);
1007 pri_bitmask = 1 << sp_pri_to_cos[i];
1008 /* COS is used remove it from bitmap.*/
1009 if (0 == (pri_bitmask & cos_bit_to_set)) {
1010 DP(NETIF_MSG_LINK,
1011 "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
1012 "invalid There can't be two COS's with"
1013 " the same strict pri\n");
1014 return -EINVAL;
1015 }
1016 cos_bit_to_set &= ~pri_bitmask;
1017 pri_set++;
1018 }
1019 }
1020
1021 /* Set all the Non strict priority i= COS*/
1022 for (i = 0; i < max_num_of_cos; i++) {
1023 pri_bitmask = 1 << i;
1024 /* Check if COS was already used for SP */
1025 if (pri_bitmask & cos_bit_to_set) {
1026 /* COS wasn't used for SP */
1027 pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
1028 i, pri_set);
1029
1030 pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
1031 i, pri_set);
1032 /* COS is used remove it from bitmap.*/
1033 cos_bit_to_set &= ~pri_bitmask;
1034 pri_set++;
1035 }
1036 }
1037
1038 if (pri_set != max_num_of_cos) {
1039 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
1040 "entries were set\n");
1041 return -EINVAL;
1042 }
1043
1044 if (port) {
1045 /* Only 6 usable clients*/
1046 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
1047 (u32)pri_cli_nig);
1048
1049 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
1050 } else {
1051 /* Only 9 usable clients*/
1052 const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
1053 const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
1054
1055 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
1056 pri_cli_nig_lsb);
1057 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
1058 pri_cli_nig_msb);
1059
1060 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
1061 }
1062 return 0;
1063}
1064
1065/******************************************************************************
1066* Description:
1067* Configure the COS to ETS according to BW and SP settings.
1068******************************************************************************/
1069int bnx2x_ets_e3b0_config(const struct link_params *params,
1070 const struct link_vars *vars,
1071 const struct bnx2x_ets_params *ets_params)
1072{
1073 struct bnx2x *bp = params->bp;
1074 int bnx2x_status = 0;
1075 const u8 port = params->port;
1076 u16 total_bw = 0;
1077 const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
1078 const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
1079 u8 cos_bw_bitmap = 0;
1080 u8 cos_sp_bitmap = 0;
1081 u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
1082 const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
1083 DCBX_E3B0_MAX_NUM_COS_PORT0;
1084 u8 cos_entry = 0;
1085
1086 if (!CHIP_IS_E3B0(bp)) {
1087 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
1088 "\n");
1089 return -EINVAL;
1090 }
1091
1092 if ((ets_params->num_of_cos > max_num_of_cos)) {
1093 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
1094 "isn't supported\n");
1095 return -EINVAL;
1096 }
1097
1098 /* Prepare sp strict priority parameters*/
1099 bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
1100
1101 /* Prepare BW parameters*/
1102 bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
1103 &total_bw);
1104 if (0 != bnx2x_status) {
1105 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed "
1106 "\n");
1107 return -EINVAL;
1108 }
1109
1110 /**
1111 * Upper bound is set according to current link speed (min_w_val
1112 * should be the same for upper bound and COS credit val).
1113 */
1114 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
1115 bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
1116
1117
1118 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
1119 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
1120 cos_bw_bitmap |= (1 << cos_entry);
1121 /**
1122 * The function also sets the BW in HW(not the mappin
1123 * yet)
1124 */
1125 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
1126 bp, cos_entry, min_w_val_nig, min_w_val_pbf,
1127 total_bw,
1128 ets_params->cos[cos_entry].params.bw_params.bw,
1129 port);
1130 } else if (bnx2x_cos_state_strict ==
1131 ets_params->cos[cos_entry].state){
1132 cos_sp_bitmap |= (1 << cos_entry);
1133
1134 bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
1135 params,
1136 sp_pri_to_cos,
1137 ets_params->cos[cos_entry].params.sp_params.pri,
1138 cos_entry);
1139
1140 } else {
1141 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not"
1142 " valid\n");
1143 return -EINVAL;
1144 }
1145 if (0 != bnx2x_status) {
1146 DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw "
1147 "failed\n");
1148 return bnx2x_status;
1149 }
1150 }
1151
1152 /* Set SP register (which COS has higher priority) */
1153 bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
1154 sp_pri_to_cos);
1155
1156 if (0 != bnx2x_status) {
1157 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg "
1158 "failed\n");
1159 return bnx2x_status;
1160 }
1161
1162 /* Set client mapping of BW and strict */
1163 bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
1164 cos_sp_bitmap,
1165 cos_bw_bitmap);
1166
1167 if (0 != bnx2x_status) {
1168 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
1169 return bnx2x_status;
1170 }
1171 return 0;
1172}
1173static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1174{
1175 /* ETS disabled configuration */
1176 struct bnx2x *bp = params->bp;
1177 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
1178 /*
1179 * defines which entries (clients) are subjected to WFQ arbitration
1180 * COS0 0x8
1181 * COS1 0x10
1182 */
1183 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
1184 /*
1185 * mapping between the ARB_CREDIT_WEIGHT registers and actual
1186 * client numbers (WEIGHT_0 does not actually have to represent
1187 * client 0)
1188 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
1189 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
1190 */
1191 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
1192
1193 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
1194 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
1195 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
1196 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
1197
1198 /* ETS mode enabled*/
1199 REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
1200
1201 /* Defines the number of consecutive slots for the strict priority */
1202 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
1203 /*
1204 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1205 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
1206 * entry, 4 - COS1 entry.
1207 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
1208 * bit4 bit3 bit2 bit1 bit0
1209 * MCP and debug are strict
1210 */
1211 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
1212
1213 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
1214 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
1215 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
1216 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
1217 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
1218}
1219
1220void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
1221 const u32 cos1_bw)
1222{
1223 /* ETS disabled configuration*/
1224 struct bnx2x *bp = params->bp;
1225 const u32 total_bw = cos0_bw + cos1_bw;
1226 u32 cos0_credit_weight = 0;
1227 u32 cos1_credit_weight = 0;
1228
1229 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
1230
1231 if ((0 == total_bw) ||
1232 (0 == cos0_bw) ||
1233 (0 == cos1_bw)) {
1234 DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
1235 return;
1236 }
1237
1238 cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
1239 total_bw;
1240 cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
1241 total_bw;
1242
1243 bnx2x_ets_bw_limit_common(params);
1244
1245 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
1246 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
1247
1248 REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
1249 REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
1250}
1251
1252int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1253{
1254 /* ETS disabled configuration*/
1255 struct bnx2x *bp = params->bp;
1256 u32 val = 0;
1257
1258 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
1259 /*
1260 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1261 * as strict. Bits 0,1,2 - debug and management entries,
1262 * 3 - COS0 entry, 4 - COS1 entry.
1263 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
1264 * bit4 bit3 bit2 bit1 bit0
1265 * MCP and debug are strict
1266 */
1267 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
1268 /*
1269 * For strict priority entries defines the number of consecutive slots
1270 * for the highest priority.
1271 */
1272 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
1273 /* ETS mode disable */
1274 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
1275 /* Defines the number of consecutive slots for the strict priority */
1276 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
1277
1278 /* Defines the number of consecutive slots for the strict priority */
1279 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
1280
1281 /*
1282 * mapping between entry priority to client number (0,1,2 -debug and
1283 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
1284 * 3bits client num.
1285 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
1286 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
1287 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
1288 */
1289 val = (0 == strict_cos) ? 0x2318 : 0x22E0;
1290 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
1291
1292 return 0;
1293}
1294/******************************************************************/
1295/* PFC section */
1296/******************************************************************/
1297
1298static void bnx2x_update_pfc_xmac(struct link_params *params,
1299 struct link_vars *vars,
1300 u8 is_lb)
1301{
1302 struct bnx2x *bp = params->bp;
1303 u32 xmac_base;
1304 u32 pause_val, pfc0_val, pfc1_val;
1305
1306 /* XMAC base adrr */
1307 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1308
1309 /* Initialize pause and pfc registers */
1310 pause_val = 0x18000;
1311 pfc0_val = 0xFFFF8000;
1312 pfc1_val = 0x2;
1313
1314 /* No PFC support */
1315 if (!(params->feature_config_flags &
1316 FEATURE_CONFIG_PFC_ENABLED)) {
1317
1318 /*
1319 * RX flow control - Process pause frame in receive direction
1320 */
1321 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1322 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
1323
1324 /*
1325 * TX flow control - Send pause packet when buffer is full
1326 */
1327 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1328 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
1329 } else {/* PFC support */
1330 pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
1331 XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
1332 XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
1333 XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
1334 }
1335
1336 /* Write pause and PFC registers */
1337 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
1338 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
1339 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
1340
1341
1342 /* Set MAC address for source TX Pause/PFC frames */
1343 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
1344 ((params->mac_addr[2] << 24) |
1345 (params->mac_addr[3] << 16) |
1346 (params->mac_addr[4] << 8) |
1347 (params->mac_addr[5])));
1348 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
1349 ((params->mac_addr[0] << 8) |
1350 (params->mac_addr[1])));
1351
1352 udelay(30);
1353}
1354
1355
1356static void bnx2x_emac_get_pfc_stat(struct link_params *params,
1357 u32 pfc_frames_sent[2],
1358 u32 pfc_frames_received[2])
1359{
1360 /* Read pfc statistic */
1361 struct bnx2x *bp = params->bp;
1362 u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1363 u32 val_xon = 0;
1364 u32 val_xoff = 0;
1365
1366 DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
1367
1368 /* PFC received frames */
1369 val_xoff = REG_RD(bp, emac_base +
1370 EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
1371 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
1372 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
1373 val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
1374
1375 pfc_frames_received[0] = val_xon + val_xoff;
1376
1377 /* PFC received sent */
1378 val_xoff = REG_RD(bp, emac_base +
1379 EMAC_REG_RX_PFC_STATS_XOFF_SENT);
1380 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
1381 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
1382 val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
1383
1384 pfc_frames_sent[0] = val_xon + val_xoff;
1385}
1386
1387/* Read pfc statistic*/
1388void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
1389 u32 pfc_frames_sent[2],
1390 u32 pfc_frames_received[2])
1391{
1392 /* Read pfc statistic */
1393 struct bnx2x *bp = params->bp;
1394
1395 DP(NETIF_MSG_LINK, "pfc statistic\n");
1396
1397 if (!vars->link_up)
1398 return;
1399
1400 if (MAC_TYPE_EMAC == vars->mac_type) {
1401 DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
1402 bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
1403 pfc_frames_received);
1404 }
1405}
1406/******************************************************************/
1407/* MAC/PBF section */
1408/******************************************************************/
1409static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
1410{
1411 u32 mode, emac_base;
1412 /**
1413 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1414 * (a value of 49==0x31) and make sure that the AUTO poll is off
1415 */
1416
1417 if (CHIP_IS_E2(bp))
1418 emac_base = GRCBASE_EMAC0;
1419 else
1420 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1421 mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1422 mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
1423 EMAC_MDIO_MODE_CLOCK_CNT);
1424 if (USES_WARPCORE(bp))
1425 mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1426 else
1427 mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1428
1429 mode |= (EMAC_MDIO_MODE_CLAUSE_45);
1430 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
1431
1432 udelay(40);
1433}
1434
1435static void bnx2x_emac_init(struct link_params *params,
1436 struct link_vars *vars)
1437{
1438 /* reset and unreset the emac core */
1439 struct bnx2x *bp = params->bp;
1440 u8 port = params->port;
1441 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1442 u32 val;
1443 u16 timeout;
1444
1445 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1446 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1447 udelay(5);
1448 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1449 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1450
1451 /* init emac - use read-modify-write */
1452 /* self clear reset */
1453 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1454 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
1455
1456 timeout = 200;
1457 do {
1458 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1459 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
1460 if (!timeout) {
1461 DP(NETIF_MSG_LINK, "EMAC timeout!\n");
1462 return;
1463 }
1464 timeout--;
1465 } while (val & EMAC_MODE_RESET);
1466 bnx2x_set_mdio_clk(bp, params->chip_id, port);
1467 /* Set mac address */
1468 val = ((params->mac_addr[0] << 8) |
1469 params->mac_addr[1]);
1470 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
1471
1472 val = ((params->mac_addr[2] << 24) |
1473 (params->mac_addr[3] << 16) |
1474 (params->mac_addr[4] << 8) |
1475 params->mac_addr[5]);
1476 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
1477}
1478
1479static void bnx2x_set_xumac_nig(struct link_params *params,
1480 u16 tx_pause_en,
1481 u8 enable)
1482{
1483 struct bnx2x *bp = params->bp;
1484
1485 REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
1486 enable);
1487 REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
1488 enable);
1489 REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
1490 NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
1491}
1492
1493static void bnx2x_umac_enable(struct link_params *params,
1494 struct link_vars *vars, u8 lb)
1495{
1496 u32 val;
1497 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
1498 struct bnx2x *bp = params->bp;
1499 /* Reset UMAC */
1500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1501 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
1502 usleep_range(1000, 1000);
1503
1504 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1505 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
1506
1507 DP(NETIF_MSG_LINK, "enabling UMAC\n");
1508
1509 /**
1510 * This register determines on which events the MAC will assert
1511 * error on the i/f to the NIG along w/ EOP.
1512 */
1513
1514 /**
1515 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
1516 * params->port*0x14, 0xfffff.
1517 */
1518 /* This register opens the gate for the UMAC despite its name */
1519 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
1520
1521 val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
1522 UMAC_COMMAND_CONFIG_REG_PAD_EN |
1523 UMAC_COMMAND_CONFIG_REG_SW_RESET |
1524 UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
1525 switch (vars->line_speed) {
1526 case SPEED_10:
1527 val |= (0<<2);
1528 break;
1529 case SPEED_100:
1530 val |= (1<<2);
1531 break;
1532 case SPEED_1000:
1533 val |= (2<<2);
1534 break;
1535 case SPEED_2500:
1536 val |= (3<<2);
1537 break;
1538 default:
1539 DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
1540 vars->line_speed);
1541 break;
1542 }
1543 if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1544 val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
1545
1546 if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
1547 val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
1548
1549 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1550 udelay(50);
1551
1552 /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
1553 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
1554 ((params->mac_addr[2] << 24) |
1555 (params->mac_addr[3] << 16) |
1556 (params->mac_addr[4] << 8) |
1557 (params->mac_addr[5])));
1558 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
1559 ((params->mac_addr[0] << 8) |
1560 (params->mac_addr[1])));
1561
1562 /* Enable RX and TX */
1563 val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
1564 val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
1565 UMAC_COMMAND_CONFIG_REG_RX_ENA;
1566 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1567 udelay(50);
1568
1569 /* Remove SW Reset */
1570 val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
1571
1572 /* Check loopback mode */
1573 if (lb)
1574 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
1575 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1576
1577 /*
1578 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1579 * length used by the MAC receive logic to check frames.
1580 */
1581 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
1582 bnx2x_set_xumac_nig(params,
1583 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
1584 vars->mac_type = MAC_TYPE_UMAC;
1585
1586}
1587
1588static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
1589{
1590 u32 port4mode_ovwr_val;
1591 /* Check 4-port override enabled */
1592 port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
1593 if (port4mode_ovwr_val & (1<<0)) {
1594 /* Return 4-port mode override value */
1595 return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
1596 }
1597 /* Return 4-port mode from input pin */
1598 return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
1599}
1600
1601/* Define the XMAC mode */
1602static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
1603{
1604 u32 is_port4mode = bnx2x_is_4_port_mode(bp);
1605
1606 /**
1607 * In 4-port mode, need to set the mode only once, so if XMAC is
1608 * already out of reset, it means the mode has already been set,
1609 * and it must not* reset the XMAC again, since it controls both
1610 * ports of the path
1611 **/
1612
1613 if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
1614 MISC_REGISTERS_RESET_REG_2_XMAC)) {
1615 DP(NETIF_MSG_LINK, "XMAC already out of reset"
1616 " in 4-port mode\n");
1617 return;
1618 }
1619
1620 /* Hard reset */
1621 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1622 MISC_REGISTERS_RESET_REG_2_XMAC);
1623 usleep_range(1000, 1000);
1624
1625 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1626 MISC_REGISTERS_RESET_REG_2_XMAC);
1627 if (is_port4mode) {
1628 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
1629
1630 /* Set the number of ports on the system side to up to 2 */
1631 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
1632
1633 /* Set the number of ports on the Warp Core to 10G */
1634 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
1635 } else {
1636 /* Set the number of ports on the system side to 1 */
1637 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
1638 if (max_speed == SPEED_10000) {
1639 DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1"
1640 " port per path\n");
1641 /* Set the number of ports on the Warp Core to 10G */
1642 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
1643 } else {
1644 DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports"
1645 " per path\n");
1646 /* Set the number of ports on the Warp Core to 20G */
1647 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
1648 }
1649 }
1650 /* Soft reset */
1651 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1652 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
1653 usleep_range(1000, 1000);
1654
1655 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1656 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
1657
1658}
1659
1660static void bnx2x_xmac_disable(struct link_params *params)
1661{
1662 u8 port = params->port;
1663 struct bnx2x *bp = params->bp;
1664 u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1665
1666 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1667 MISC_REGISTERS_RESET_REG_2_XMAC) {
1668 /*
1669 * Send an indication to change the state in the NIG back to XON
1670 * Clearing this bit enables the next set of this bit to get
1671 * rising edge
1672 */
1673 pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
1674 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1675 (pfc_ctrl & ~(1<<1)));
1676 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1677 (pfc_ctrl | (1<<1)));
1678 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
1679 REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
1680 usleep_range(1000, 1000);
1681 bnx2x_set_xumac_nig(params, 0, 0);
1682 REG_WR(bp, xmac_base + XMAC_REG_CTRL,
1683 XMAC_CTRL_REG_SOFT_RESET);
1684 }
1685}
1686
1687static int bnx2x_xmac_enable(struct link_params *params,
1688 struct link_vars *vars, u8 lb)
1689{
1690 u32 val, xmac_base;
1691 struct bnx2x *bp = params->bp;
1692 DP(NETIF_MSG_LINK, "enabling XMAC\n");
1693
1694 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1695
1696 bnx2x_xmac_init(bp, vars->line_speed);
1697
1698 /*
1699 * This register determines on which events the MAC will assert
1700 * error on the i/f to the NIG along w/ EOP.
1701 */
1702
1703 /*
1704 * This register tells the NIG whether to send traffic to UMAC
1705 * or XMAC
1706 */
1707 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
1708
1709 /* Set Max packet size */
1710 REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
1711
1712 /* CRC append for Tx packets */
1713 REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
1714
1715 /* update PFC */
1716 bnx2x_update_pfc_xmac(params, vars, 0);
1717
1718 /* Enable TX and RX */
1719 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
1720
1721 /* Check loopback mode */
1722 if (lb)
1723 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
1724 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1725 bnx2x_set_xumac_nig(params,
1726 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
1727
1728 vars->mac_type = MAC_TYPE_XMAC;
1729
1730 return 0;
1731}
1732static int bnx2x_emac_enable(struct link_params *params,
1733 struct link_vars *vars, u8 lb)
1734{
1735 struct bnx2x *bp = params->bp;
1736 u8 port = params->port;
1737 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1738 u32 val;
1739
1740 DP(NETIF_MSG_LINK, "enabling EMAC\n");
1741
1742 /* Disable BMAC */
1743 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1744 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1745
1746 /* enable emac and not bmac */
1747 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
1748
1749 /* ASIC */
1750 if (vars->phy_flags & PHY_XGXS_FLAG) {
1751 u32 ser_lane = ((params->lane_config &
1752 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1753 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1754
1755 DP(NETIF_MSG_LINK, "XGXS\n");
1756 /* select the master lanes (out of 0-3) */
1757 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
1758 /* select XGXS */
1759 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1760
1761 } else { /* SerDes */
1762 DP(NETIF_MSG_LINK, "SerDes\n");
1763 /* select SerDes */
1764 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1765 }
1766
1767 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
1768 EMAC_RX_MODE_RESET);
1769 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1770 EMAC_TX_MODE_RESET);
1771
1772 if (CHIP_REV_IS_SLOW(bp)) {
1773 /* config GMII mode */
1774 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1775 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
1776 } else { /* ASIC */
1777 /* pause enable/disable */
1778 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
1779 EMAC_RX_MODE_FLOW_EN);
1780
1781 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1782 (EMAC_TX_MODE_EXT_PAUSE_EN |
1783 EMAC_TX_MODE_FLOW_EN));
1784 if (!(params->feature_config_flags &
1785 FEATURE_CONFIG_PFC_ENABLED)) {
1786 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1787 bnx2x_bits_en(bp, emac_base +
1788 EMAC_REG_EMAC_RX_MODE,
1789 EMAC_RX_MODE_FLOW_EN);
1790
1791 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1792 bnx2x_bits_en(bp, emac_base +
1793 EMAC_REG_EMAC_TX_MODE,
1794 (EMAC_TX_MODE_EXT_PAUSE_EN |
1795 EMAC_TX_MODE_FLOW_EN));
1796 } else
1797 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1798 EMAC_TX_MODE_FLOW_EN);
1799 }
1800
1801 /* KEEP_VLAN_TAG, promiscuous */
1802 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
1803 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
1804
1805 /*
1806 * Setting this bit causes MAC control frames (except for pause
1807 * frames) to be passed on for processing. This setting has no
1808 * affect on the operation of the pause frames. This bit effects
1809 * all packets regardless of RX Parser packet sorting logic.
1810 * Turn the PFC off to make sure we are in Xon state before
1811 * enabling it.
1812 */
1813 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
1814 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
1815 DP(NETIF_MSG_LINK, "PFC is enabled\n");
1816 /* Enable PFC again */
1817 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
1818 EMAC_REG_RX_PFC_MODE_RX_EN |
1819 EMAC_REG_RX_PFC_MODE_TX_EN |
1820 EMAC_REG_RX_PFC_MODE_PRIORITIES);
1821
1822 EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
1823 ((0x0101 <<
1824 EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
1825 (0x00ff <<
1826 EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
1827 val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
1828 }
1829 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
1830
1831 /* Set Loopback */
1832 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1833 if (lb)
1834 val |= 0x810;
1835 else
1836 val &= ~0x810;
1837 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
1838
1839 /* enable emac */
1840 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
1841
1842 /* enable emac for jumbo packets */
1843 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
1844 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
1845 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
1846
1847 /* strip CRC */
1848 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
1849
1850 /* disable the NIG in/out to the bmac */
1851 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
1852 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
1853 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
1854
1855 /* enable the NIG in/out to the emac */
1856 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
1857 val = 0;
1858 if ((params->feature_config_flags &
1859 FEATURE_CONFIG_PFC_ENABLED) ||
1860 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1861 val = 1;
1862
1863 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
1864 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
1865
1866 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
1867
1868 vars->mac_type = MAC_TYPE_EMAC;
1869 return 0;
1870}
1871
1872static void bnx2x_update_pfc_bmac1(struct link_params *params,
1873 struct link_vars *vars)
1874{
1875 u32 wb_data[2];
1876 struct bnx2x *bp = params->bp;
1877 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
1878 NIG_REG_INGRESS_BMAC0_MEM;
1879
1880 u32 val = 0x14;
1881 if ((!(params->feature_config_flags &
1882 FEATURE_CONFIG_PFC_ENABLED)) &&
1883 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
1884 /* Enable BigMAC to react on received Pause packets */
1885 val |= (1<<5);
1886 wb_data[0] = val;
1887 wb_data[1] = 0;
1888 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
1889
1890 /* tx control */
1891 val = 0xc0;
1892 if (!(params->feature_config_flags &
1893 FEATURE_CONFIG_PFC_ENABLED) &&
1894 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1895 val |= 0x800000;
1896 wb_data[0] = val;
1897 wb_data[1] = 0;
1898 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
1899}
1900
1901static void bnx2x_update_pfc_bmac2(struct link_params *params,
1902 struct link_vars *vars,
1903 u8 is_lb)
1904{
1905 /*
1906 * Set rx control: Strip CRC and enable BigMAC to relay
1907 * control packets to the system as well
1908 */
1909 u32 wb_data[2];
1910 struct bnx2x *bp = params->bp;
1911 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
1912 NIG_REG_INGRESS_BMAC0_MEM;
1913 u32 val = 0x14;
1914
1915 if ((!(params->feature_config_flags &
1916 FEATURE_CONFIG_PFC_ENABLED)) &&
1917 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
1918 /* Enable BigMAC to react on received Pause packets */
1919 val |= (1<<5);
1920 wb_data[0] = val;
1921 wb_data[1] = 0;
1922 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
1923 udelay(30);
1924
1925 /* Tx control */
1926 val = 0xc0;
1927 if (!(params->feature_config_flags &
1928 FEATURE_CONFIG_PFC_ENABLED) &&
1929 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1930 val |= 0x800000;
1931 wb_data[0] = val;
1932 wb_data[1] = 0;
1933 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
1934
1935 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
1936 DP(NETIF_MSG_LINK, "PFC is enabled\n");
1937 /* Enable PFC RX & TX & STATS and set 8 COS */
1938 wb_data[0] = 0x0;
1939 wb_data[0] |= (1<<0); /* RX */
1940 wb_data[0] |= (1<<1); /* TX */
1941 wb_data[0] |= (1<<2); /* Force initial Xon */
1942 wb_data[0] |= (1<<3); /* 8 cos */
1943 wb_data[0] |= (1<<5); /* STATS */
1944 wb_data[1] = 0;
1945 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
1946 wb_data, 2);
1947 /* Clear the force Xon */
1948 wb_data[0] &= ~(1<<2);
1949 } else {
1950 DP(NETIF_MSG_LINK, "PFC is disabled\n");
1951 /* disable PFC RX & TX & STATS and set 8 COS */
1952 wb_data[0] = 0x8;
1953 wb_data[1] = 0;
1954 }
1955
1956 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
1957
1958 /*
1959 * Set Time (based unit is 512 bit time) between automatic
1960 * re-sending of PP packets amd enable automatic re-send of
1961 * Per-Priroity Packet as long as pp_gen is asserted and
1962 * pp_disable is low.
1963 */
1964 val = 0x8000;
1965 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
1966 val |= (1<<16); /* enable automatic re-send */
1967
1968 wb_data[0] = val;
1969 wb_data[1] = 0;
1970 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
1971 wb_data, 2);
1972
1973 /* mac control */
1974 val = 0x3; /* Enable RX and TX */
1975 if (is_lb) {
1976 val |= 0x4; /* Local loopback */
1977 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
1978 }
1979 /* When PFC enabled, Pass pause frames towards the NIG. */
1980 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
1981 val |= ((1<<6)|(1<<5));
1982
1983 wb_data[0] = val;
1984 wb_data[1] = 0;
1985 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
1986}
1987
1988
1989/* PFC BRB internal port configuration params */
1990struct bnx2x_pfc_brb_threshold_val {
1991 u32 pause_xoff;
1992 u32 pause_xon;
1993 u32 full_xoff;
1994 u32 full_xon;
1995};
1996
1997struct bnx2x_pfc_brb_e3b0_val {
1998 u32 full_lb_xoff_th;
1999 u32 full_lb_xon_threshold;
2000 u32 lb_guarantied;
2001 u32 mac_0_class_t_guarantied;
2002 u32 mac_0_class_t_guarantied_hyst;
2003 u32 mac_1_class_t_guarantied;
2004 u32 mac_1_class_t_guarantied_hyst;
2005};
2006
2007struct bnx2x_pfc_brb_th_val {
2008 struct bnx2x_pfc_brb_threshold_val pauseable_th;
2009 struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
2010};
2011static int bnx2x_pfc_brb_get_config_params(
2012 struct link_params *params,
2013 struct bnx2x_pfc_brb_th_val *config_val)
2014{
2015 struct bnx2x *bp = params->bp;
2016 DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
2017 if (CHIP_IS_E2(bp)) {
2018 config_val->pauseable_th.pause_xoff =
2019 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2020 config_val->pauseable_th.pause_xon =
2021 PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
2022 config_val->pauseable_th.full_xoff =
2023 PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
2024 config_val->pauseable_th.full_xon =
2025 PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
2026 /* non pause able*/
2027 config_val->non_pauseable_th.pause_xoff =
2028 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2029 config_val->non_pauseable_th.pause_xon =
2030 PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2031 config_val->non_pauseable_th.full_xoff =
2032 PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2033 config_val->non_pauseable_th.full_xon =
2034 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2035 } else if (CHIP_IS_E3A0(bp)) {
2036 config_val->pauseable_th.pause_xoff =
2037 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2038 config_val->pauseable_th.pause_xon =
2039 PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
2040 config_val->pauseable_th.full_xoff =
2041 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
2042 config_val->pauseable_th.full_xon =
2043 PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
2044 /* non pause able*/
2045 config_val->non_pauseable_th.pause_xoff =
2046 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2047 config_val->non_pauseable_th.pause_xon =
2048 PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2049 config_val->non_pauseable_th.full_xoff =
2050 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2051 config_val->non_pauseable_th.full_xon =
2052 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2053 } else if (CHIP_IS_E3B0(bp)) {
2054 if (params->phy[INT_PHY].flags &
2055 FLAGS_4_PORT_MODE) {
2056 config_val->pauseable_th.pause_xoff =
2057 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2058 config_val->pauseable_th.pause_xon =
2059 PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
2060 config_val->pauseable_th.full_xoff =
2061 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2062 config_val->pauseable_th.full_xon =
2063 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
2064 /* non pause able*/
2065 config_val->non_pauseable_th.pause_xoff =
2066 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2067 config_val->non_pauseable_th.pause_xon =
2068 PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2069 config_val->non_pauseable_th.full_xoff =
2070 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2071 config_val->non_pauseable_th.full_xon =
2072 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2073 } else {
2074 config_val->pauseable_th.pause_xoff =
2075 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2076 config_val->pauseable_th.pause_xon =
2077 PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
2078 config_val->pauseable_th.full_xoff =
2079 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2080 config_val->pauseable_th.full_xon =
2081 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
2082 /* non pause able*/
2083 config_val->non_pauseable_th.pause_xoff =
2084 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2085 config_val->non_pauseable_th.pause_xon =
2086 PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2087 config_val->non_pauseable_th.full_xoff =
2088 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2089 config_val->non_pauseable_th.full_xon =
2090 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2091 }
2092 } else
2093 return -EINVAL;
2094
2095 return 0;
2096}
2097
2098
2099static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
2100 struct bnx2x_pfc_brb_e3b0_val
2101 *e3b0_val,
2102 u32 cos0_pauseable,
2103 u32 cos1_pauseable)
2104{
2105 if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
2106 e3b0_val->full_lb_xoff_th =
2107 PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
2108 e3b0_val->full_lb_xon_threshold =
2109 PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
2110 e3b0_val->lb_guarantied =
2111 PFC_E3B0_4P_LB_GUART;
2112 e3b0_val->mac_0_class_t_guarantied =
2113 PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
2114 e3b0_val->mac_0_class_t_guarantied_hyst =
2115 PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
2116 e3b0_val->mac_1_class_t_guarantied =
2117 PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
2118 e3b0_val->mac_1_class_t_guarantied_hyst =
2119 PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
2120 } else {
2121 e3b0_val->full_lb_xoff_th =
2122 PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
2123 e3b0_val->full_lb_xon_threshold =
2124 PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
2125 e3b0_val->mac_0_class_t_guarantied_hyst =
2126 PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
2127 e3b0_val->mac_1_class_t_guarantied =
2128 PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
2129 e3b0_val->mac_1_class_t_guarantied_hyst =
2130 PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
2131
2132 if (cos0_pauseable != cos1_pauseable) {
2133 /* nonpauseable= Lossy + pauseable = Lossless*/
2134 e3b0_val->lb_guarantied =
2135 PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
2136 e3b0_val->mac_0_class_t_guarantied =
2137 PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
2138 } else if (cos0_pauseable) {
2139 /* Lossless +Lossless*/
2140 e3b0_val->lb_guarantied =
2141 PFC_E3B0_2P_PAUSE_LB_GUART;
2142 e3b0_val->mac_0_class_t_guarantied =
2143 PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
2144 } else {
2145 /* Lossy +Lossy*/
2146 e3b0_val->lb_guarantied =
2147 PFC_E3B0_2P_NON_PAUSE_LB_GUART;
2148 e3b0_val->mac_0_class_t_guarantied =
2149 PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
2150 }
2151 }
2152}
2153static int bnx2x_update_pfc_brb(struct link_params *params,
2154 struct link_vars *vars,
2155 struct bnx2x_nig_brb_pfc_port_params
2156 *pfc_params)
2157{
2158 struct bnx2x *bp = params->bp;
2159 struct bnx2x_pfc_brb_th_val config_val = { {0} };
2160 struct bnx2x_pfc_brb_threshold_val *reg_th_config =
2161 &config_val.pauseable_th;
2162 struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
2163 int set_pfc = params->feature_config_flags &
2164 FEATURE_CONFIG_PFC_ENABLED;
2165 int bnx2x_status = 0;
2166 u8 port = params->port;
2167
2168 /* default - pause configuration */
2169 reg_th_config = &config_val.pauseable_th;
2170 bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
2171 if (0 != bnx2x_status)
2172 return bnx2x_status;
2173
2174 if (set_pfc && pfc_params)
2175 /* First COS */
2176 if (!pfc_params->cos0_pauseable)
2177 reg_th_config = &config_val.non_pauseable_th;
2178 /*
2179 * The number of free blocks below which the pause signal to class 0
2180 * of MAC #n is asserted. n=0,1
2181 */
2182 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
2183 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
2184 reg_th_config->pause_xoff);
2185 /*
2186 * The number of free blocks above which the pause signal to class 0
2187 * of MAC #n is de-asserted. n=0,1
2188 */
2189 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
2190 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
2191 /*
2192 * The number of free blocks below which the full signal to class 0
2193 * of MAC #n is asserted. n=0,1
2194 */
2195 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
2196 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
2197 /*
2198 * The number of free blocks above which the full signal to class 0
2199 * of MAC #n is de-asserted. n=0,1
2200 */
2201 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
2202 BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
2203
2204 if (set_pfc && pfc_params) {
2205 /* Second COS */
2206 if (pfc_params->cos1_pauseable)
2207 reg_th_config = &config_val.pauseable_th;
2208 else
2209 reg_th_config = &config_val.non_pauseable_th;
2210 /*
2211 * The number of free blocks below which the pause signal to
2212 * class 1 of MAC #n is asserted. n=0,1
2213 **/
2214 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
2215 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
2216 reg_th_config->pause_xoff);
2217 /*
2218 * The number of free blocks above which the pause signal to
2219 * class 1 of MAC #n is de-asserted. n=0,1
2220 */
2221 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
2222 BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
2223 reg_th_config->pause_xon);
2224 /*
2225 * The number of free blocks below which the full signal to
2226 * class 1 of MAC #n is asserted. n=0,1
2227 */
2228 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
2229 BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
2230 reg_th_config->full_xoff);
2231 /*
2232 * The number of free blocks above which the full signal to
2233 * class 1 of MAC #n is de-asserted. n=0,1
2234 */
2235 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
2236 BRB1_REG_FULL_1_XON_THRESHOLD_0,
2237 reg_th_config->full_xon);
2238
2239
2240 if (CHIP_IS_E3B0(bp)) {
2241 /*Should be done by init tool */
2242 /*
2243 * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
2244 * reset value
2245 * 944
2246 */
2247
2248 /**
2249 * The hysteresis on the guarantied buffer space for the Lb port
2250 * before signaling XON.
2251 **/
2252 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
2253
2254 bnx2x_pfc_brb_get_e3b0_config_params(
2255 params,
2256 &e3b0_val,
2257 pfc_params->cos0_pauseable,
2258 pfc_params->cos1_pauseable);
2259 /**
2260 * The number of free blocks below which the full signal to the
2261 * LB port is asserted.
2262 */
2263 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
2264 e3b0_val.full_lb_xoff_th);
2265 /**
2266 * The number of free blocks above which the full signal to the
2267 * LB port is de-asserted.
2268 */
2269 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
2270 e3b0_val.full_lb_xon_threshold);
2271 /**
2272 * The number of blocks guarantied for the MAC #n port. n=0,1
2273 */
2274
2275 /*The number of blocks guarantied for the LB port.*/
2276 REG_WR(bp, BRB1_REG_LB_GUARANTIED,
2277 e3b0_val.lb_guarantied);
2278
2279 /**
2280 * The number of blocks guarantied for the MAC #n port.
2281 */
2282 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
2283 2 * e3b0_val.mac_0_class_t_guarantied);
2284 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
2285 2 * e3b0_val.mac_1_class_t_guarantied);
2286 /**
2287 * The number of blocks guarantied for class #t in MAC0. t=0,1
2288 */
2289 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
2290 e3b0_val.mac_0_class_t_guarantied);
2291 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
2292 e3b0_val.mac_0_class_t_guarantied);
2293 /**
2294 * The hysteresis on the guarantied buffer space for class in
2295 * MAC0. t=0,1
2296 */
2297 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
2298 e3b0_val.mac_0_class_t_guarantied_hyst);
2299 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
2300 e3b0_val.mac_0_class_t_guarantied_hyst);
2301
2302 /**
2303 * The number of blocks guarantied for class #t in MAC1.t=0,1
2304 */
2305 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
2306 e3b0_val.mac_1_class_t_guarantied);
2307 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
2308 e3b0_val.mac_1_class_t_guarantied);
2309 /**
2310 * The hysteresis on the guarantied buffer space for class #t
2311 * in MAC1. t=0,1
2312 */
2313 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
2314 e3b0_val.mac_1_class_t_guarantied_hyst);
2315 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
2316 e3b0_val.mac_1_class_t_guarantied_hyst);
2317
2318 }
2319
2320 }
2321
2322 return bnx2x_status;
2323}
2324
2325/******************************************************************************
2326* Description:
2327* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
2328* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
2329******************************************************************************/
2330int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
2331 u8 cos_entry,
2332 u32 priority_mask, u8 port)
2333{
2334 u32 nig_reg_rx_priority_mask_add = 0;
2335
2336 switch (cos_entry) {
2337 case 0:
2338 nig_reg_rx_priority_mask_add = (port) ?
2339 NIG_REG_P1_RX_COS0_PRIORITY_MASK :
2340 NIG_REG_P0_RX_COS0_PRIORITY_MASK;
2341 break;
2342 case 1:
2343 nig_reg_rx_priority_mask_add = (port) ?
2344 NIG_REG_P1_RX_COS1_PRIORITY_MASK :
2345 NIG_REG_P0_RX_COS1_PRIORITY_MASK;
2346 break;
2347 case 2:
2348 nig_reg_rx_priority_mask_add = (port) ?
2349 NIG_REG_P1_RX_COS2_PRIORITY_MASK :
2350 NIG_REG_P0_RX_COS2_PRIORITY_MASK;
2351 break;
2352 case 3:
2353 if (port)
2354 return -EINVAL;
2355 nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
2356 break;
2357 case 4:
2358 if (port)
2359 return -EINVAL;
2360 nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
2361 break;
2362 case 5:
2363 if (port)
2364 return -EINVAL;
2365 nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
2366 break;
2367 }
2368
2369 REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
2370
2371 return 0;
2372}
2373static void bnx2x_update_mng(struct link_params *params, u32 link_status)
2374{
2375 struct bnx2x *bp = params->bp;
2376
2377 REG_WR(bp, params->shmem_base +
2378 offsetof(struct shmem_region,
2379 port_mb[params->port].link_status), link_status);
2380}
2381
2382static void bnx2x_update_pfc_nig(struct link_params *params,
2383 struct link_vars *vars,
2384 struct bnx2x_nig_brb_pfc_port_params *nig_params)
2385{
2386 u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
2387 u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
2388 u32 pkt_priority_to_cos = 0;
2389 struct bnx2x *bp = params->bp;
2390 u8 port = params->port;
2391
2392 int set_pfc = params->feature_config_flags &
2393 FEATURE_CONFIG_PFC_ENABLED;
2394 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
2395
2396 /*
2397 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2398 * MAC control frames (that are not pause packets)
2399 * will be forwarded to the XCM.
2400 */
2401 xcm_mask = REG_RD(bp,
2402 port ? NIG_REG_LLH1_XCM_MASK :
2403 NIG_REG_LLH0_XCM_MASK);
2404 /*
2405 * nig params will override non PFC params, since it's possible to
2406 * do transition from PFC to SAFC
2407 */
2408 if (set_pfc) {
2409 pause_enable = 0;
2410 llfc_out_en = 0;
2411 llfc_enable = 0;
2412 if (CHIP_IS_E3(bp))
2413 ppp_enable = 0;
2414 else
2415 ppp_enable = 1;
2416 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
2417 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
2418 xcm0_out_en = 0;
2419 p0_hwpfc_enable = 1;
2420 } else {
2421 if (nig_params) {
2422 llfc_out_en = nig_params->llfc_out_en;
2423 llfc_enable = nig_params->llfc_enable;
2424 pause_enable = nig_params->pause_enable;
2425 } else /*defaul non PFC mode - PAUSE */
2426 pause_enable = 1;
2427
2428 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
2429 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
2430 xcm0_out_en = 1;
2431 }
2432
2433 if (CHIP_IS_E3(bp))
2434 REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
2435 NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
2436 REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
2437 NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
2438 REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
2439 NIG_REG_LLFC_ENABLE_0, llfc_enable);
2440 REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
2441 NIG_REG_PAUSE_ENABLE_0, pause_enable);
2442
2443 REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
2444 NIG_REG_PPP_ENABLE_0, ppp_enable);
2445
2446 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
2447 NIG_REG_LLH0_XCM_MASK, xcm_mask);
2448
2449 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
2450
2451 /* output enable for RX_XCM # IF */
2452 REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
2453
2454 /* HW PFC TX enable */
2455 REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
2456
2457 if (nig_params) {
2458 u8 i = 0;
2459 pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
2460
2461 for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
2462 bnx2x_pfc_nig_rx_priority_mask(bp, i,
2463 nig_params->rx_cos_priority_mask[i], port);
2464
2465 REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
2466 NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
2467 nig_params->llfc_high_priority_classes);
2468
2469 REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
2470 NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
2471 nig_params->llfc_low_priority_classes);
2472 }
2473 REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
2474 NIG_REG_P0_PKT_PRIORITY_TO_COS,
2475 pkt_priority_to_cos);
2476}
2477
2478int bnx2x_update_pfc(struct link_params *params,
2479 struct link_vars *vars,
2480 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
2481{
2482 /*
2483 * The PFC and pause are orthogonal to one another, meaning when
2484 * PFC is enabled, the pause are disabled, and when PFC is
2485 * disabled, pause are set according to the pause result.
2486 */
2487 u32 val;
2488 struct bnx2x *bp = params->bp;
2489 int bnx2x_status = 0;
2490 u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
2491
2492 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
2493 vars->link_status |= LINK_STATUS_PFC_ENABLED;
2494 else
2495 vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
2496
2497 bnx2x_update_mng(params, vars->link_status);
2498
2499 /* update NIG params */
2500 bnx2x_update_pfc_nig(params, vars, pfc_params);
2501
2502 /* update BRB params */
2503 bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
2504 if (0 != bnx2x_status)
2505 return bnx2x_status;
2506
2507 if (!vars->link_up)
2508 return bnx2x_status;
2509
2510 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
2511 if (CHIP_IS_E3(bp))
2512 bnx2x_update_pfc_xmac(params, vars, 0);
2513 else {
2514 val = REG_RD(bp, MISC_REG_RESET_REG_2);
2515 if ((val &
2516 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
2517 == 0) {
2518 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
2519 bnx2x_emac_enable(params, vars, 0);
2520 return bnx2x_status;
2521 }
2522
2523 if (CHIP_IS_E2(bp))
2524 bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
2525 else
2526 bnx2x_update_pfc_bmac1(params, vars);
2527
2528 val = 0;
2529 if ((params->feature_config_flags &
2530 FEATURE_CONFIG_PFC_ENABLED) ||
2531 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
2532 val = 1;
2533 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
2534 }
2535 return bnx2x_status;
2536}
2537
2538
2539static int bnx2x_bmac1_enable(struct link_params *params,
2540 struct link_vars *vars,
2541 u8 is_lb)
2542{
2543 struct bnx2x *bp = params->bp;
2544 u8 port = params->port;
2545 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2546 NIG_REG_INGRESS_BMAC0_MEM;
2547 u32 wb_data[2];
2548 u32 val;
2549
2550 DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
2551
2552 /* XGXS control */
2553 wb_data[0] = 0x3c;
2554 wb_data[1] = 0;
2555 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2556 wb_data, 2);
2557
2558 /* tx MAC SA */
2559 wb_data[0] = ((params->mac_addr[2] << 24) |
2560 (params->mac_addr[3] << 16) |
2561 (params->mac_addr[4] << 8) |
2562 params->mac_addr[5]);
2563 wb_data[1] = ((params->mac_addr[0] << 8) |
2564 params->mac_addr[1]);
2565 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
2566
2567 /* mac control */
2568 val = 0x3;
2569 if (is_lb) {
2570 val |= 0x4;
2571 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2572 }
2573 wb_data[0] = val;
2574 wb_data[1] = 0;
2575 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
2576
2577 /* set rx mtu */
2578 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2579 wb_data[1] = 0;
2580 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
2581
2582 bnx2x_update_pfc_bmac1(params, vars);
2583
2584 /* set tx mtu */
2585 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2586 wb_data[1] = 0;
2587 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
2588
2589 /* set cnt max size */
2590 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2591 wb_data[1] = 0;
2592 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
2593
2594 /* configure safc */
2595 wb_data[0] = 0x1000200;
2596 wb_data[1] = 0;
2597 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2598 wb_data, 2);
2599
2600 return 0;
2601}
2602
2603static int bnx2x_bmac2_enable(struct link_params *params,
2604 struct link_vars *vars,
2605 u8 is_lb)
2606{
2607 struct bnx2x *bp = params->bp;
2608 u8 port = params->port;
2609 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2610 NIG_REG_INGRESS_BMAC0_MEM;
2611 u32 wb_data[2];
2612
2613 DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
2614
2615 wb_data[0] = 0;
2616 wb_data[1] = 0;
2617 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
2618 udelay(30);
2619
2620 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
2621 wb_data[0] = 0x3c;
2622 wb_data[1] = 0;
2623 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
2624 wb_data, 2);
2625
2626 udelay(30);
2627
2628 /* tx MAC SA */
2629 wb_data[0] = ((params->mac_addr[2] << 24) |
2630 (params->mac_addr[3] << 16) |
2631 (params->mac_addr[4] << 8) |
2632 params->mac_addr[5]);
2633 wb_data[1] = ((params->mac_addr[0] << 8) |
2634 params->mac_addr[1]);
2635 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
2636 wb_data, 2);
2637
2638 udelay(30);
2639
2640 /* Configure SAFC */
2641 wb_data[0] = 0x1000200;
2642 wb_data[1] = 0;
2643 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
2644 wb_data, 2);
2645 udelay(30);
2646
2647 /* set rx mtu */
2648 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2649 wb_data[1] = 0;
2650 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
2651 udelay(30);
2652
2653 /* set tx mtu */
2654 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2655 wb_data[1] = 0;
2656 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
2657 udelay(30);
2658 /* set cnt max size */
2659 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
2660 wb_data[1] = 0;
2661 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
2662 udelay(30);
2663 bnx2x_update_pfc_bmac2(params, vars, is_lb);
2664
2665 return 0;
2666}
2667
2668static int bnx2x_bmac_enable(struct link_params *params,
2669 struct link_vars *vars,
2670 u8 is_lb)
2671{
2672 int rc = 0;
2673 u8 port = params->port;
2674 struct bnx2x *bp = params->bp;
2675 u32 val;
2676 /* reset and unreset the BigMac */
2677 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2678 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2679 msleep(1);
2680
2681 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2682 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2683
2684 /* enable access for bmac registers */
2685 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2686
2687 /* Enable BMAC according to BMAC type*/
2688 if (CHIP_IS_E2(bp))
2689 rc = bnx2x_bmac2_enable(params, vars, is_lb);
2690 else
2691 rc = bnx2x_bmac1_enable(params, vars, is_lb);
2692 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2693 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2694 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2695 val = 0;
2696 if ((params->feature_config_flags &
2697 FEATURE_CONFIG_PFC_ENABLED) ||
2698 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
2699 val = 1;
2700 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2701 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2702 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2703 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2704 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2705 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2706
2707 vars->mac_type = MAC_TYPE_BMAC;
2708 return rc;
2709}
2710
2711static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
2712{
2713 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2714 NIG_REG_INGRESS_BMAC0_MEM;
2715 u32 wb_data[2];
2716 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
2717
2718 /* Only if the bmac is out of reset */
2719 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2720 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
2721 nig_bmac_enable) {
2722
2723 if (CHIP_IS_E2(bp)) {
2724 /* Clear Rx Enable bit in BMAC_CONTROL register */
2725 REG_RD_DMAE(bp, bmac_addr +
2726 BIGMAC2_REGISTER_BMAC_CONTROL,
2727 wb_data, 2);
2728 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
2729 REG_WR_DMAE(bp, bmac_addr +
2730 BIGMAC2_REGISTER_BMAC_CONTROL,
2731 wb_data, 2);
2732 } else {
2733 /* Clear Rx Enable bit in BMAC_CONTROL register */
2734 REG_RD_DMAE(bp, bmac_addr +
2735 BIGMAC_REGISTER_BMAC_CONTROL,
2736 wb_data, 2);
2737 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
2738 REG_WR_DMAE(bp, bmac_addr +
2739 BIGMAC_REGISTER_BMAC_CONTROL,
2740 wb_data, 2);
2741 }
2742 msleep(1);
2743 }
2744}
2745
2746static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2747 u32 line_speed)
2748{
2749 struct bnx2x *bp = params->bp;
2750 u8 port = params->port;
2751 u32 init_crd, crd;
2752 u32 count = 1000;
2753
2754 /* disable port */
2755 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2756
2757 /* wait for init credit */
2758 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2759 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2760 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2761
2762 while ((init_crd != crd) && count) {
2763 msleep(5);
2764
2765 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2766 count--;
2767 }
2768 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2769 if (init_crd != crd) {
2770 DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
2771 init_crd, crd);
2772 return -EINVAL;
2773 }
2774
2775 if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
2776 line_speed == SPEED_10 ||
2777 line_speed == SPEED_100 ||
2778 line_speed == SPEED_1000 ||
2779 line_speed == SPEED_2500) {
2780 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
2781 /* update threshold */
2782 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2783 /* update init credit */
2784 init_crd = 778; /* (800-18-4) */
2785
2786 } else {
2787 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
2788 ETH_OVREHEAD)/16;
2789 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
2790 /* update threshold */
2791 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2792 /* update init credit */
2793 switch (line_speed) {
2794 case SPEED_10000:
2795 init_crd = thresh + 553 - 22;
2796 break;
2797 default:
2798 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
2799 line_speed);
2800 return -EINVAL;
2801 }
2802 }
2803 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2804 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2805 line_speed, init_crd);
2806
2807 /* probe the credit changes */
2808 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2809 msleep(5);
2810 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2811
2812 /* enable port */
2813 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2814 return 0;
2815}
2816
2817/**
2818 * bnx2x_get_emac_base - retrive emac base address
2819 *
2820 * @bp: driver handle
2821 * @mdc_mdio_access: access type
2822 * @port: port id
2823 *
2824 * This function selects the MDC/MDIO access (through emac0 or
2825 * emac1) depend on the mdc_mdio_access, port, port swapped. Each
2826 * phy has a default access mode, which could also be overridden
2827 * by nvram configuration. This parameter, whether this is the
2828 * default phy configuration, or the nvram overrun
2829 * configuration, is passed here as mdc_mdio_access and selects
2830 * the emac_base for the CL45 read/writes operations
2831 */
2832static u32 bnx2x_get_emac_base(struct bnx2x *bp,
2833 u32 mdc_mdio_access, u8 port)
2834{
2835 u32 emac_base = 0;
2836 switch (mdc_mdio_access) {
2837 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
2838 break;
2839 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
2840 if (REG_RD(bp, NIG_REG_PORT_SWAP))
2841 emac_base = GRCBASE_EMAC1;
2842 else
2843 emac_base = GRCBASE_EMAC0;
2844 break;
2845 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
2846 if (REG_RD(bp, NIG_REG_PORT_SWAP))
2847 emac_base = GRCBASE_EMAC0;
2848 else
2849 emac_base = GRCBASE_EMAC1;
2850 break;
2851 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
2852 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2853 break;
2854 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
2855 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
2856 break;
2857 default:
2858 break;
2859 }
2860 return emac_base;
2861
2862}
2863
2864/******************************************************************/
2865/* CL22 access functions */
2866/******************************************************************/
2867static int bnx2x_cl22_write(struct bnx2x *bp,
2868 struct bnx2x_phy *phy,
2869 u16 reg, u16 val)
2870{
2871 u32 tmp, mode;
2872 u8 i;
2873 int rc = 0;
2874 /* Switch to CL22 */
2875 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
2876 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
2877 mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2878
2879 /* address */
2880 tmp = ((phy->addr << 21) | (reg << 16) | val |
2881 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
2882 EMAC_MDIO_COMM_START_BUSY);
2883 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
2884
2885 for (i = 0; i < 50; i++) {
2886 udelay(10);
2887
2888 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
2889 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
2890 udelay(5);
2891 break;
2892 }
2893 }
2894 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
2895 DP(NETIF_MSG_LINK, "write phy register failed\n");
2896 rc = -EFAULT;
2897 }
2898 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
2899 return rc;
2900}
2901
2902static int bnx2x_cl22_read(struct bnx2x *bp,
2903 struct bnx2x_phy *phy,
2904 u16 reg, u16 *ret_val)
2905{
2906 u32 val, mode;
2907 u16 i;
2908 int rc = 0;
2909
2910 /* Switch to CL22 */
2911 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
2912 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
2913 mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2914
2915 /* address */
2916 val = ((phy->addr << 21) | (reg << 16) |
2917 EMAC_MDIO_COMM_COMMAND_READ_22 |
2918 EMAC_MDIO_COMM_START_BUSY);
2919 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
2920
2921 for (i = 0; i < 50; i++) {
2922 udelay(10);
2923
2924 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
2925 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
2926 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
2927 udelay(5);
2928 break;
2929 }
2930 }
2931 if (val & EMAC_MDIO_COMM_START_BUSY) {
2932 DP(NETIF_MSG_LINK, "read phy register failed\n");
2933
2934 *ret_val = 0;
2935 rc = -EFAULT;
2936 }
2937 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
2938 return rc;
2939}
2940
2941/******************************************************************/
2942/* CL45 access functions */
2943/******************************************************************/
2944static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
2945 u8 devad, u16 reg, u16 *ret_val)
2946{
2947 u32 val;
2948 u16 i;
2949 int rc = 0;
2950 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2951 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2952 EMAC_MDIO_STATUS_10MB);
2953 /* address */
2954 val = ((phy->addr << 21) | (devad << 16) | reg |
2955 EMAC_MDIO_COMM_COMMAND_ADDRESS |
2956 EMAC_MDIO_COMM_START_BUSY);
2957 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
2958
2959 for (i = 0; i < 50; i++) {
2960 udelay(10);
2961
2962 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
2963 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
2964 udelay(5);
2965 break;
2966 }
2967 }
2968 if (val & EMAC_MDIO_COMM_START_BUSY) {
2969 DP(NETIF_MSG_LINK, "read phy register failed\n");
2970 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
2971 *ret_val = 0;
2972 rc = -EFAULT;
2973 } else {
2974 /* data */
2975 val = ((phy->addr << 21) | (devad << 16) |
2976 EMAC_MDIO_COMM_COMMAND_READ_45 |
2977 EMAC_MDIO_COMM_START_BUSY);
2978 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
2979
2980 for (i = 0; i < 50; i++) {
2981 udelay(10);
2982
2983 val = REG_RD(bp, phy->mdio_ctrl +
2984 EMAC_REG_EMAC_MDIO_COMM);
2985 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
2986 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
2987 break;
2988 }
2989 }
2990 if (val & EMAC_MDIO_COMM_START_BUSY) {
2991 DP(NETIF_MSG_LINK, "read phy register failed\n");
2992 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
2993 *ret_val = 0;
2994 rc = -EFAULT;
2995 }
2996 }
2997 /* Work around for E3 A0 */
2998 if (phy->flags & FLAGS_MDC_MDIO_WA) {
2999 phy->flags ^= FLAGS_DUMMY_READ;
3000 if (phy->flags & FLAGS_DUMMY_READ) {
3001 u16 temp_val;
3002 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
3003 }
3004 }
3005
3006 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3007 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3008 EMAC_MDIO_STATUS_10MB);
3009 return rc;
3010}
3011
3012static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3013 u8 devad, u16 reg, u16 val)
3014{
3015 u32 tmp;
3016 u8 i;
3017 int rc = 0;
3018 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3019 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3020 EMAC_MDIO_STATUS_10MB);
3021
3022 /* address */
3023
3024 tmp = ((phy->addr << 21) | (devad << 16) | reg |
3025 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3026 EMAC_MDIO_COMM_START_BUSY);
3027 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
3028
3029 for (i = 0; i < 50; i++) {
3030 udelay(10);
3031
3032 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
3033 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
3034 udelay(5);
3035 break;
3036 }
3037 }
3038 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
3039 DP(NETIF_MSG_LINK, "write phy register failed\n");
3040 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
3041 rc = -EFAULT;
3042
3043 } else {
3044 /* data */
3045 tmp = ((phy->addr << 21) | (devad << 16) | val |
3046 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
3047 EMAC_MDIO_COMM_START_BUSY);
3048 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
3049
3050 for (i = 0; i < 50; i++) {
3051 udelay(10);
3052
3053 tmp = REG_RD(bp, phy->mdio_ctrl +
3054 EMAC_REG_EMAC_MDIO_COMM);
3055 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
3056 udelay(5);
3057 break;
3058 }
3059 }
3060 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
3061 DP(NETIF_MSG_LINK, "write phy register failed\n");
3062 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
3063 rc = -EFAULT;
3064 }
3065 }
3066 /* Work around for E3 A0 */
3067 if (phy->flags & FLAGS_MDC_MDIO_WA) {
3068 phy->flags ^= FLAGS_DUMMY_READ;
3069 if (phy->flags & FLAGS_DUMMY_READ) {
3070 u16 temp_val;
3071 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
3072 }
3073 }
3074 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3075 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3076 EMAC_MDIO_STATUS_10MB);
3077 return rc;
3078}
3079
3080
3081/******************************************************************/
3082/* BSC access functions from E3 */
3083/******************************************************************/
3084static void bnx2x_bsc_module_sel(struct link_params *params)
3085{
3086 int idx;
3087 u32 board_cfg, sfp_ctrl;
3088 u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
3089 struct bnx2x *bp = params->bp;
3090 u8 port = params->port;
3091 /* Read I2C output PINs */
3092 board_cfg = REG_RD(bp, params->shmem_base +
3093 offsetof(struct shmem_region,
3094 dev_info.shared_hw_config.board));
3095 i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
3096 i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
3097 SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
3098
3099 /* Read I2C output value */
3100 sfp_ctrl = REG_RD(bp, params->shmem_base +
3101 offsetof(struct shmem_region,
3102 dev_info.port_hw_config[port].e3_cmn_pin_cfg));
3103 i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
3104 i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
3105 DP(NETIF_MSG_LINK, "Setting BSC switch\n");
3106 for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
3107 bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
3108}
3109
3110static int bnx2x_bsc_read(struct link_params *params,
3111 struct bnx2x_phy *phy,
3112 u8 sl_devid,
3113 u16 sl_addr,
3114 u8 lc_addr,
3115 u8 xfer_cnt,
3116 u32 *data_array)
3117{
3118 u32 val, i;
3119 int rc = 0;
3120 struct bnx2x *bp = params->bp;
3121
3122 if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
3123 DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
3124 return -EINVAL;
3125 }
3126
3127 if (xfer_cnt > 16) {
3128 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
3129 xfer_cnt);
3130 return -EINVAL;
3131 }
3132 bnx2x_bsc_module_sel(params);
3133
3134 xfer_cnt = 16 - lc_addr;
3135
3136 /* enable the engine */
3137 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3138 val |= MCPR_IMC_COMMAND_ENABLE;
3139 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3140
3141 /* program slave device ID */
3142 val = (sl_devid << 16) | sl_addr;
3143 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
3144
3145 /* start xfer with 0 byte to update the address pointer ???*/
3146 val = (MCPR_IMC_COMMAND_ENABLE) |
3147 (MCPR_IMC_COMMAND_WRITE_OP <<
3148 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
3149 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
3150 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3151
3152 /* poll for completion */
3153 i = 0;
3154 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3155 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
3156 udelay(10);
3157 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3158 if (i++ > 1000) {
3159 DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
3160 i);
3161 rc = -EFAULT;
3162 break;
3163 }
3164 }
3165 if (rc == -EFAULT)
3166 return rc;
3167
3168 /* start xfer with read op */
3169 val = (MCPR_IMC_COMMAND_ENABLE) |
3170 (MCPR_IMC_COMMAND_READ_OP <<
3171 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
3172 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
3173 (xfer_cnt);
3174 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3175
3176 /* poll for completion */
3177 i = 0;
3178 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3179 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
3180 udelay(10);
3181 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3182 if (i++ > 1000) {
3183 DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
3184 rc = -EFAULT;
3185 break;
3186 }
3187 }
3188 if (rc == -EFAULT)
3189 return rc;
3190
3191 for (i = (lc_addr >> 2); i < 4; i++) {
3192 data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
3193#ifdef __BIG_ENDIAN
3194 data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
3195 ((data_array[i] & 0x0000ff00) << 8) |
3196 ((data_array[i] & 0x00ff0000) >> 8) |
3197 ((data_array[i] & 0xff000000) >> 24);
3198#endif
3199 }
3200 return rc;
3201}
3202
3203static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3204 u8 devad, u16 reg, u16 or_val)
3205{
3206 u16 val;
3207 bnx2x_cl45_read(bp, phy, devad, reg, &val);
3208 bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
3209}
3210
3211int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
3212 u8 devad, u16 reg, u16 *ret_val)
3213{
3214 u8 phy_index;
3215 /*
3216 * Probe for the phy according to the given phy_addr, and execute
3217 * the read request on it
3218 */
3219 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
3220 if (params->phy[phy_index].addr == phy_addr) {
3221 return bnx2x_cl45_read(params->bp,
3222 &params->phy[phy_index], devad,
3223 reg, ret_val);
3224 }
3225 }
3226 return -EINVAL;
3227}
3228
3229int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
3230 u8 devad, u16 reg, u16 val)
3231{
3232 u8 phy_index;
3233 /*
3234 * Probe for the phy according to the given phy_addr, and execute
3235 * the write request on it
3236 */
3237 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
3238 if (params->phy[phy_index].addr == phy_addr) {
3239 return bnx2x_cl45_write(params->bp,
3240 &params->phy[phy_index], devad,
3241 reg, val);
3242 }
3243 }
3244 return -EINVAL;
3245}
3246static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3247 struct link_params *params)
3248{
3249 u8 lane = 0;
3250 struct bnx2x *bp = params->bp;
3251 u32 path_swap, path_swap_ovr;
3252 u8 path, port;
3253
3254 path = BP_PATH(bp);
3255 port = params->port;
3256
3257 if (bnx2x_is_4_port_mode(bp)) {
3258 u32 port_swap, port_swap_ovr;
3259
3260 /*figure out path swap value */
3261 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
3262 if (path_swap_ovr & 0x1)
3263 path_swap = (path_swap_ovr & 0x2);
3264 else
3265 path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
3266
3267 if (path_swap)
3268 path = path ^ 1;
3269
3270 /*figure out port swap value */
3271 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
3272 if (port_swap_ovr & 0x1)
3273 port_swap = (port_swap_ovr & 0x2);
3274 else
3275 port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
3276
3277 if (port_swap)
3278 port = port ^ 1;
3279
3280 lane = (port<<1) + path;
3281 } else { /* two port mode - no port swap */
3282
3283 /*figure out path swap value */
3284 path_swap_ovr =
3285 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
3286 if (path_swap_ovr & 0x1) {
3287 path_swap = (path_swap_ovr & 0x2);
3288 } else {
3289 path_swap =
3290 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
3291 }
3292 if (path_swap)
3293 path = path ^ 1;
3294
3295 lane = path << 1 ;
3296 }
3297 return lane;
3298}
3299
3300static void bnx2x_set_aer_mmd(struct link_params *params,
3301 struct bnx2x_phy *phy)
3302{
3303 u32 ser_lane;
3304 u16 offset, aer_val;
3305 struct bnx2x *bp = params->bp;
3306 ser_lane = ((params->lane_config &
3307 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
3308 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
3309
3310 offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
3311 (phy->addr + ser_lane) : 0;
3312
3313 if (USES_WARPCORE(bp)) {
3314 aer_val = bnx2x_get_warpcore_lane(phy, params);
3315 /*
3316 * In Dual-lane mode, two lanes are joined together,
3317 * so in order to configure them, the AER broadcast method is
3318 * used here.
3319 * 0x200 is the broadcast address for lanes 0,1
3320 * 0x201 is the broadcast address for lanes 2,3
3321 */
3322 if (phy->flags & FLAGS_WC_DUAL_MODE)
3323 aer_val = (aer_val >> 1) | 0x200;
3324 } else if (CHIP_IS_E2(bp))
3325 aer_val = 0x3800 + offset - 1;
3326 else
3327 aer_val = 0x3800 + offset;
3328 DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
3329 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3330 MDIO_AER_BLOCK_AER_REG, aer_val);
3331
3332}
3333
3334/******************************************************************/
3335/* Internal phy section */
3336/******************************************************************/
3337
3338static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
3339{
3340 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3341
3342 /* Set Clause 22 */
3343 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
3344 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
3345 udelay(500);
3346 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
3347 udelay(500);
3348 /* Set Clause 45 */
3349 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
3350}
3351
3352static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3353{
3354 u32 val;
3355
3356 DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
3357
3358 val = SERDES_RESET_BITS << (port*16);
3359
3360 /* reset and unreset the SerDes/XGXS */
3361 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3362 udelay(500);
3363 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3364
3365 bnx2x_set_serdes_access(bp, port);
3366
3367 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
3368 DEFAULT_PHY_DEV_ADDR);
3369}
3370
3371static void bnx2x_xgxs_deassert(struct link_params *params)
3372{
3373 struct bnx2x *bp = params->bp;
3374 u8 port;
3375 u32 val;
3376 DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
3377 port = params->port;
3378
3379 val = XGXS_RESET_BITS << (port*16);
3380
3381 /* reset and unreset the SerDes/XGXS */
3382 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3383 udelay(500);
3384 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3385
3386 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
3387 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
3388 params->phy[INT_PHY].def_md_devad);
3389}
3390
3391static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
3392 struct link_params *params, u16 *ieee_fc)
3393{
3394 struct bnx2x *bp = params->bp;
3395 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3396 /**
3397 * resolve pause mode and advertisement Please refer to Table
3398 * 28B-3 of the 802.3ab-1999 spec
3399 */
3400
3401 switch (phy->req_flow_ctrl) {
3402 case BNX2X_FLOW_CTRL_AUTO:
3403 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
3404 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3405 else
3406 *ieee_fc |=
3407 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3408 break;
3409
3410 case BNX2X_FLOW_CTRL_TX:
3411 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3412 break;
3413
3414 case BNX2X_FLOW_CTRL_RX:
3415 case BNX2X_FLOW_CTRL_BOTH:
3416 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3417 break;
3418
3419 case BNX2X_FLOW_CTRL_NONE:
3420 default:
3421 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3422 break;
3423 }
3424 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
3425}
3426
3427static void set_phy_vars(struct link_params *params,
3428 struct link_vars *vars)
3429{
3430 struct bnx2x *bp = params->bp;
3431 u8 actual_phy_idx, phy_index, link_cfg_idx;
3432 u8 phy_config_swapped = params->multi_phy_config &
3433 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
3434 for (phy_index = INT_PHY; phy_index < params->num_phys;
3435 phy_index++) {
3436 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
3437 actual_phy_idx = phy_index;
3438 if (phy_config_swapped) {
3439 if (phy_index == EXT_PHY1)
3440 actual_phy_idx = EXT_PHY2;
3441 else if (phy_index == EXT_PHY2)
3442 actual_phy_idx = EXT_PHY1;
3443 }
3444 params->phy[actual_phy_idx].req_flow_ctrl =
3445 params->req_flow_ctrl[link_cfg_idx];
3446
3447 params->phy[actual_phy_idx].req_line_speed =
3448 params->req_line_speed[link_cfg_idx];
3449
3450 params->phy[actual_phy_idx].speed_cap_mask =
3451 params->speed_cap_mask[link_cfg_idx];
3452
3453 params->phy[actual_phy_idx].req_duplex =
3454 params->req_duplex[link_cfg_idx];
3455
3456 if (params->req_line_speed[link_cfg_idx] ==
3457 SPEED_AUTO_NEG)
3458 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
3459
3460 DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
3461 " speed_cap_mask %x\n",
3462 params->phy[actual_phy_idx].req_flow_ctrl,
3463 params->phy[actual_phy_idx].req_line_speed,
3464 params->phy[actual_phy_idx].speed_cap_mask);
3465 }
3466}
3467
3468static void bnx2x_ext_phy_set_pause(struct link_params *params,
3469 struct bnx2x_phy *phy,
3470 struct link_vars *vars)
3471{
3472 u16 val;
3473 struct bnx2x *bp = params->bp;
3474 /* read modify write pause advertizing */
3475 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
3476
3477 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
3478
3479 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3480 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
3481 if ((vars->ieee_fc &
3482 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3483 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3484 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3485 }
3486 if ((vars->ieee_fc &
3487 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3488 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3489 val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
3490 }
3491 DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
3492 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
3493}
3494
3495static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
3496{ /* LD LP */
3497 switch (pause_result) { /* ASYM P ASYM P */
3498 case 0xb: /* 1 0 1 1 */
3499 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
3500 break;
3501
3502 case 0xe: /* 1 1 1 0 */
3503 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
3504 break;
3505
3506 case 0x5: /* 0 1 0 1 */
3507 case 0x7: /* 0 1 1 1 */
3508 case 0xd: /* 1 1 0 1 */
3509 case 0xf: /* 1 1 1 1 */
3510 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
3511 break;
3512
3513 default:
3514 break;
3515 }
3516 if (pause_result & (1<<0))
3517 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
3518 if (pause_result & (1<<1))
3519 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
3520}
3521
3522static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3523 struct link_params *params,
3524 struct link_vars *vars)
3525{
3526 struct bnx2x *bp = params->bp;
3527 u16 ld_pause; /* local */
3528 u16 lp_pause; /* link partner */
3529 u16 pause_result;
3530 u8 ret = 0;
3531 /* read twice */
3532
3533 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
3534
3535 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
3536 vars->flow_ctrl = phy->req_flow_ctrl;
3537 else if (phy->req_line_speed != SPEED_AUTO_NEG)
3538 vars->flow_ctrl = params->req_fc_auto_adv;
3539 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
3540 ret = 1;
3541 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
3542 bnx2x_cl22_read(bp, phy,
3543 0x4, &ld_pause);
3544 bnx2x_cl22_read(bp, phy,
3545 0x5, &lp_pause);
3546 } else {
3547 bnx2x_cl45_read(bp, phy,
3548 MDIO_AN_DEVAD,
3549 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3550 bnx2x_cl45_read(bp, phy,
3551 MDIO_AN_DEVAD,
3552 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3553 }
3554 pause_result = (ld_pause &
3555 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
3556 pause_result |= (lp_pause &
3557 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
3558 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
3559 pause_result);
3560 bnx2x_pause_resolve(vars, pause_result);
3561 }
3562 return ret;
3563}
3564/******************************************************************/
3565/* Warpcore section */
3566/******************************************************************/
3567/* The init_internal_warpcore should mirror the xgxs,
3568 * i.e. reset the lane (if needed), set aer for the
3569 * init configuration, and set/clear SGMII flag. Internal
3570 * phy init is done purely in phy_init stage.
3571 */
3572static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3573 struct link_params *params,
3574 struct link_vars *vars) {
3575 u16 val16 = 0, lane, bam37 = 0;
3576 struct bnx2x *bp = params->bp;
3577 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3578 /* Check adding advertisement for 1G KX */
3579 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3580 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3581 (vars->line_speed == SPEED_1000)) {
3582 u16 sd_digital;
3583 val16 |= (1<<5);
3584
3585 /* Enable CL37 1G Parallel Detect */
3586 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3587 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
3588 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3589 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
3590 (sd_digital | 0x1));
3591
3592 DP(NETIF_MSG_LINK, "Advertize 1G\n");
3593 }
3594 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3595 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
3596 (vars->line_speed == SPEED_10000)) {
3597 /* Check adding advertisement for 10G KR */
3598 val16 |= (1<<7);
3599 /* Enable 10G Parallel Detect */
3600 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3601 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
3602
3603 DP(NETIF_MSG_LINK, "Advertize 10G\n");
3604 }
3605
3606 /* Set Transmit PMD settings */
3607 lane = bnx2x_get_warpcore_lane(phy, params);
3608 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3609 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3610 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3611 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3612 (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3613 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3614 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
3615 0x03f0);
3616 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3617 MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
3618 0x03f0);
3619 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3620 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3621 0x383f);
3622
3623 /* Advertised speeds */
3624 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3625 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
3626
3627 /* Advertised and set FEC (Forward Error Correction) */
3628 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3629 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
3630 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
3632
3633 /* Enable CL37 BAM */
3634 if (REG_RD(bp, params->shmem_base +
3635 offsetof(struct shmem_region, dev_info.
3636 port_hw_config[params->port].default_cfg)) &
3637 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
3638 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3639 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
3640 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3641 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
3642 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
3643 }
3644
3645 /* Advertise pause */
3646 bnx2x_ext_phy_set_pause(params, phy, vars);
3647
3648 /* Enable Autoneg */
3649 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3650 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
3651
3652 /* Over 1G - AN local device user page 1 */
3653 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3654 MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
3655
3656 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3657 MDIO_WC_REG_DIGITAL5_MISC7, &val16);
3658
3659 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3660 MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
3661}
3662
3663static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3664 struct link_params *params,
3665 struct link_vars *vars)
3666{
3667 struct bnx2x *bp = params->bp;
3668 u16 val;
3669
3670 /* Disable Autoneg */
3671 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3672 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
3673
3674 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3675 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
3676
3677 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3678 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
3679
3680 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3681 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
3682
3683 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3684 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
3685
3686 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3687 MDIO_WC_REG_DIGITAL3_UP1, 0x1);
3688
3689 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3690 MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
3691
3692 /* Disable CL36 PCS Tx */
3693 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3694 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
3695
3696 /* Double Wide Single Data Rate @ pll rate */
3697 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3698 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
3699
3700 /* Leave cl72 training enable, needed for KR */
3701 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3702 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
3703 0x2);
3704
3705 /* Leave CL72 enabled */
3706 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3707 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3708 &val);
3709 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3710 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3711 val | 0x3800);
3712
3713 /* Set speed via PMA/PMD register */
3714 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3715 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
3716
3717 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3718 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
3719
3720 /*Enable encoded forced speed */
3721 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3722 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
3723
3724 /* Turn TX scramble payload only the 64/66 scrambler */
3725 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3726 MDIO_WC_REG_TX66_CONTROL, 0x9);
3727
3728 /* Turn RX scramble payload only the 64/66 scrambler */
3729 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3730 MDIO_WC_REG_RX66_CONTROL, 0xF9);
3731
3732 /* set and clear loopback to cause a reset to 64/66 decoder */
3733 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3734 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
3735 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3736 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
3737
3738}
3739
3740static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3741 struct link_params *params,
3742 u8 is_xfi)
3743{
3744 struct bnx2x *bp = params->bp;
3745 u16 misc1_val, tap_val, tx_driver_val, lane, val;
3746 /* Hold rxSeqStart */
3747 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3748 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
3749 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3750 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
3751
3752 /* Hold tx_fifo_reset */
3753 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3754 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
3755 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3756 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
3757
3758 /* Disable CL73 AN */
3759 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
3760
3761 /* Disable 100FX Enable and Auto-Detect */
3762 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3763 MDIO_WC_REG_FX100_CTRL1, &val);
3764 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3765 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
3766
3767 /* Disable 100FX Idle detect */
3768 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3769 MDIO_WC_REG_FX100_CTRL3, &val);
3770 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3771 MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
3772
3773 /* Set Block address to Remote PHY & Clear forced_speed[5] */
3774 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3775 MDIO_WC_REG_DIGITAL4_MISC3, &val);
3776 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3777 MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
3778
3779 /* Turn off auto-detect & fiber mode */
3780 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3781 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
3782 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3783 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
3784 (val & 0xFFEE));
3785
3786 /* Set filter_force_link, disable_false_link and parallel_detect */
3787 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3788 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
3789 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3790 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
3791 ((val | 0x0006) & 0xFFFE));
3792
3793 /* Set XFI / SFI */
3794 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3795 MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
3796
3797 misc1_val &= ~(0x1f);
3798
3799 if (is_xfi) {
3800 misc1_val |= 0x5;
3801 tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3802 (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
3803 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
3804 tx_driver_val =
3805 ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3806 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3807 (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
3808
3809 } else {
3810 misc1_val |= 0x9;
3811 tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3812 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
3813 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
3814 tx_driver_val =
3815 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3816 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3817 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
3818 }
3819 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3820 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
3821
3822 /* Set Transmit PMD settings */
3823 lane = bnx2x_get_warpcore_lane(phy, params);
3824 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3825 MDIO_WC_REG_TX_FIR_TAP,
3826 tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
3827 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3828 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3829 tx_driver_val);
3830
3831 /* Enable fiber mode, enable and invert sig_det */
3832 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3833 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
3834 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3835 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
3836
3837 /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
3838 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3839 MDIO_WC_REG_DIGITAL4_MISC3, &val);
3840 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3841 MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
3842
3843 /* 10G XFI Full Duplex */
3844 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3845 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
3846
3847 /* Release tx_fifo_reset */
3848 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3849 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
3850 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3851 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
3852
3853 /* Release rxSeqStart */
3854 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3855 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
3856 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3857 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
3858}
3859
3860static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
3861 struct bnx2x_phy *phy)
3862{
3863 DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
3864}
3865
3866static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
3867 struct bnx2x_phy *phy,
3868 u16 lane)
3869{
3870 /* Rx0 anaRxControl1G */
3871 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3872 MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
3873
3874 /* Rx2 anaRxControl1G */
3875 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3876 MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
3877
3878 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3879 MDIO_WC_REG_RX66_SCW0, 0xE070);
3880
3881 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3882 MDIO_WC_REG_RX66_SCW1, 0xC0D0);
3883
3884 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3885 MDIO_WC_REG_RX66_SCW2, 0xA0B0);
3886
3887 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3888 MDIO_WC_REG_RX66_SCW3, 0x8090);
3889
3890 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3891 MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
3892
3893 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3894 MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
3895
3896 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3897 MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
3898
3899 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3900 MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
3901
3902 /* Serdes Digital Misc1 */
3903 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3904 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
3905
3906 /* Serdes Digital4 Misc3 */
3907 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3908 MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
3909
3910 /* Set Transmit PMD settings */
3911 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3912 MDIO_WC_REG_TX_FIR_TAP,
3913 ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3914 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
3915 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
3916 MDIO_WC_REG_TX_FIR_TAP_ENABLE));
3917 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3918 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3919 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3920 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3921 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3922}
3923
3924static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
3925 struct link_params *params,
3926 u8 fiber_mode)
3927{
3928 struct bnx2x *bp = params->bp;
3929 u16 val16, digctrl_kx1, digctrl_kx2;
3930 u8 lane;
3931
3932 lane = bnx2x_get_warpcore_lane(phy, params);
3933
3934 /* Clear XFI clock comp in non-10G single lane mode. */
3935 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3936 MDIO_WC_REG_RX66_CONTROL, &val16);
3937 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3938 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
3939
3940 if (phy->req_line_speed == SPEED_AUTO_NEG) {
3941 /* SGMII Autoneg */
3942 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3943 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
3944 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3945 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
3946 val16 | 0x1000);
3947 DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
3948 } else {
3949 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3950 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
3951 val16 &= 0xcfbf;
3952 switch (phy->req_line_speed) {
3953 case SPEED_10:
3954 break;
3955 case SPEED_100:
3956 val16 |= 0x2000;
3957 break;
3958 case SPEED_1000:
3959 val16 |= 0x0040;
3960 break;
3961 default:
3962 DP(NETIF_MSG_LINK, "Speed not supported: 0x%x"
3963 "\n", phy->req_line_speed);
3964 return;
3965 }
3966
3967 if (phy->req_duplex == DUPLEX_FULL)
3968 val16 |= 0x0100;
3969
3970 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3971 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
3972
3973 DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
3974 phy->req_line_speed);
3975 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3976 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
3977 DP(NETIF_MSG_LINK, " (readback) %x\n", val16);
3978 }
3979
3980 /* SGMII Slave mode and disable signal detect */
3981 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3982 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
3983 if (fiber_mode)
3984 digctrl_kx1 = 1;
3985 else
3986 digctrl_kx1 &= 0xff4a;
3987
3988 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3989 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
3990 digctrl_kx1);
3991
3992 /* Turn off parallel detect */
3993 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3994 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
3995 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3996 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
3997 (digctrl_kx2 & ~(1<<2)));
3998
3999 /* Re-enable parallel detect */
4000 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4001 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
4002 (digctrl_kx2 | (1<<2)));
4003
4004 /* Enable autodet */
4005 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4006 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
4007 (digctrl_kx1 | 0x10));
4008}
4009
4010static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
4011 struct bnx2x_phy *phy,
4012 u8 reset)
4013{
4014 u16 val;
4015 /* Take lane out of reset after configuration is finished */
4016 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4017 MDIO_WC_REG_DIGITAL5_MISC6, &val);
4018 if (reset)
4019 val |= 0xC000;
4020 else
4021 val &= 0x3FFF;
4022 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4023 MDIO_WC_REG_DIGITAL5_MISC6, val);
4024 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4025 MDIO_WC_REG_DIGITAL5_MISC6, &val);
4026}
4027
4028
4029 /* Clear SFI/XFI link settings registers */
4030static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
4031 struct link_params *params,
4032 u16 lane)
4033{
4034 struct bnx2x *bp = params->bp;
4035 u16 val16;
4036
4037 /* Set XFI clock comp as default. */
4038 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4039 MDIO_WC_REG_RX66_CONTROL, &val16);
4040 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4041 MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
4042
4043 bnx2x_warpcore_reset_lane(bp, phy, 1);
4044 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
4045 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4046 MDIO_WC_REG_FX100_CTRL1, 0x014a);
4047 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4048 MDIO_WC_REG_FX100_CTRL3, 0x0800);
4049 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4050 MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
4051 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4052 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
4053 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4054 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
4055 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4056 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
4057 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4058 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
4059 lane = bnx2x_get_warpcore_lane(phy, params);
4060 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4061 MDIO_WC_REG_TX_FIR_TAP, 0x0000);
4062 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4063 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
4064 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4065 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
4066 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4067 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
4068 bnx2x_warpcore_reset_lane(bp, phy, 0);
4069}
4070
4071static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
4072 u32 chip_id,
4073 u32 shmem_base, u8 port,
4074 u8 *gpio_num, u8 *gpio_port)
4075{
4076 u32 cfg_pin;
4077 *gpio_num = 0;
4078 *gpio_port = 0;
4079 if (CHIP_IS_E3(bp)) {
4080 cfg_pin = (REG_RD(bp, shmem_base +
4081 offsetof(struct shmem_region,
4082 dev_info.port_hw_config[port].e3_sfp_ctrl)) &
4083 PORT_HW_CFG_E3_MOD_ABS_MASK) >>
4084 PORT_HW_CFG_E3_MOD_ABS_SHIFT;
4085
4086 /*
4087 * Should not happen. This function called upon interrupt
4088 * triggered by GPIO ( since EPIO can only generate interrupts
4089 * to MCP).
4090 * So if this function was called and none of the GPIOs was set,
4091 * it means the shit hit the fan.
4092 */
4093 if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
4094 (cfg_pin > PIN_CFG_GPIO3_P1)) {
4095 DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for "
4096 "module detect indication\n",
4097 cfg_pin);
4098 return -EINVAL;
4099 }
4100
4101 *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
4102 *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
4103 } else {
4104 *gpio_num = MISC_REGISTERS_GPIO_3;
4105 *gpio_port = port;
4106 }
4107 DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
4108 return 0;
4109}
4110
4111static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
4112 struct link_params *params)
4113{
4114 struct bnx2x *bp = params->bp;
4115 u8 gpio_num, gpio_port;
4116 u32 gpio_val;
4117 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
4118 params->shmem_base, params->port,
4119 &gpio_num, &gpio_port) != 0)
4120 return 0;
4121 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
4122
4123 /* Call the handling function in case module is detected */
4124 if (gpio_val == 0)
4125 return 1;
4126 else
4127 return 0;
4128}
4129
4130static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4131 struct link_params *params,
4132 struct link_vars *vars)
4133{
4134 struct bnx2x *bp = params->bp;
4135 u32 serdes_net_if;
4136 u8 fiber_mode;
4137 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4138 serdes_net_if = (REG_RD(bp, params->shmem_base +
4139 offsetof(struct shmem_region, dev_info.
4140 port_hw_config[params->port].default_cfg)) &
4141 PORT_HW_CFG_NET_SERDES_IF_MASK);
4142 DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
4143 "serdes_net_if = 0x%x\n",
4144 vars->line_speed, serdes_net_if);
4145 bnx2x_set_aer_mmd(params, phy);
4146
4147 vars->phy_flags |= PHY_XGXS_FLAG;
4148 if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
4149 (phy->req_line_speed &&
4150 ((phy->req_line_speed == SPEED_100) ||
4151 (phy->req_line_speed == SPEED_10)))) {
4152 vars->phy_flags |= PHY_SGMII_FLAG;
4153 DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
4154 bnx2x_warpcore_clear_regs(phy, params, lane);
4155 bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
4156 } else {
4157 switch (serdes_net_if) {
4158 case PORT_HW_CFG_NET_SERDES_IF_KR:
4159 /* Enable KR Auto Neg */
4160 if (params->loopback_mode == LOOPBACK_NONE)
4161 bnx2x_warpcore_enable_AN_KR(phy, params, vars);
4162 else {
4163 DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
4164 bnx2x_warpcore_set_10G_KR(phy, params, vars);
4165 }
4166 break;
4167
4168 case PORT_HW_CFG_NET_SERDES_IF_XFI:
4169 bnx2x_warpcore_clear_regs(phy, params, lane);
4170 if (vars->line_speed == SPEED_10000) {
4171 DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
4172 bnx2x_warpcore_set_10G_XFI(phy, params, 1);
4173 } else {
4174 if (SINGLE_MEDIA_DIRECT(params)) {
4175 DP(NETIF_MSG_LINK, "1G Fiber\n");
4176 fiber_mode = 1;
4177 } else {
4178 DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
4179 fiber_mode = 0;
4180 }
4181 bnx2x_warpcore_set_sgmii_speed(phy,
4182 params,
4183 fiber_mode);
4184 }
4185
4186 break;
4187
4188 case PORT_HW_CFG_NET_SERDES_IF_SFI:
4189
4190 bnx2x_warpcore_clear_regs(phy, params, lane);
4191 if (vars->line_speed == SPEED_10000) {
4192 DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
4193 bnx2x_warpcore_set_10G_XFI(phy, params, 0);
4194 } else if (vars->line_speed == SPEED_1000) {
4195 DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
4196 bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
4197 }
4198 /* Issue Module detection */
4199 if (bnx2x_is_sfp_module_plugged(phy, params))
4200 bnx2x_sfp_module_detection(phy, params);
4201 break;
4202
4203 case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
4204 if (vars->line_speed != SPEED_20000) {
4205 DP(NETIF_MSG_LINK, "Speed not supported yet\n");
4206 return;
4207 }
4208 DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
4209 bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
4210 /* Issue Module detection */
4211
4212 bnx2x_sfp_module_detection(phy, params);
4213 break;
4214
4215 case PORT_HW_CFG_NET_SERDES_IF_KR2:
4216 if (vars->line_speed != SPEED_20000) {
4217 DP(NETIF_MSG_LINK, "Speed not supported yet\n");
4218 return;
4219 }
4220 DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
4221 bnx2x_warpcore_set_20G_KR2(bp, phy);
4222 break;
4223
4224 default:
4225 DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface "
4226 "0x%x\n", serdes_net_if);
4227 return;
4228 }
4229 }
4230
4231 /* Take lane out of reset after configuration is finished */
4232 bnx2x_warpcore_reset_lane(bp, phy, 0);
4233 DP(NETIF_MSG_LINK, "Exit config init\n");
4234}
4235
4236static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
4237 struct bnx2x_phy *phy,
4238 u8 tx_en)
4239{
4240 struct bnx2x *bp = params->bp;
4241 u32 cfg_pin;
4242 u8 port = params->port;
4243
4244 cfg_pin = REG_RD(bp, params->shmem_base +
4245 offsetof(struct shmem_region,
4246 dev_info.port_hw_config[port].e3_sfp_ctrl)) &
4247 PORT_HW_CFG_TX_LASER_MASK;
4248 /* Set the !tx_en since this pin is DISABLE_TX_LASER */
4249 DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
4250 /* For 20G, the expected pin to be used is 3 pins after the current */
4251
4252 bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
4253 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
4254 bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
4255}
4256
4257static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4258 struct link_params *params)
4259{
4260 struct bnx2x *bp = params->bp;
4261 u16 val16;
4262 bnx2x_sfp_e3_set_transmitter(params, phy, 0);
4263 bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
4264 bnx2x_set_aer_mmd(params, phy);
4265 /* Global register */
4266 bnx2x_warpcore_reset_lane(bp, phy, 1);
4267
4268 /* Clear loopback settings (if any) */
4269 /* 10G & 20G */
4270 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4271 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
4272 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4273 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
4274 0xBFFF);
4275
4276 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4277 MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
4278 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4279 MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
4280
4281 /* Update those 1-copy registers */
4282 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4283 MDIO_AER_BLOCK_AER_REG, 0);
4284 /* Enable 1G MDIO (1-copy) */
4285 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4286 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4287 &val16);
4288 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4289 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4290 val16 & ~0x10);
4291
4292 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4293 MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
4294 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4295 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4296 val16 & 0xff00);
4297
4298}
4299
4300static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4301 struct link_params *params)
4302{
4303 struct bnx2x *bp = params->bp;
4304 u16 val16;
4305 u32 lane;
4306 DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
4307 params->loopback_mode, phy->req_line_speed);
4308
4309 if (phy->req_line_speed < SPEED_10000) {
4310 /* 10/100/1000 */
4311
4312 /* Update those 1-copy registers */
4313 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4314 MDIO_AER_BLOCK_AER_REG, 0);
4315 /* Enable 1G MDIO (1-copy) */
4316 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4317 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4318 &val16);
4319 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4320 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4321 val16 | 0x10);
4322 /* Set 1G loopback based on lane (1-copy) */
4323 lane = bnx2x_get_warpcore_lane(phy, params);
4324 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4325 MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
4326 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4327 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4328 val16 | (1<<lane));
4329
4330 /* Switch back to 4-copy registers */
4331 bnx2x_set_aer_mmd(params, phy);
4332 /* Global loopback, not recommended. */
4333 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4334 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
4335 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4336 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
4337 0x4000);
4338 } else {
4339 /* 10G & 20G */
4340 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4341 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
4342 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4343 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
4344 0x4000);
4345
4346 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4347 MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
4348 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4349 MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
4350 }
4351}
4352
4353
4354void bnx2x_link_status_update(struct link_params *params,
4355 struct link_vars *vars)
4356{
4357 struct bnx2x *bp = params->bp;
4358 u8 link_10g_plus;
4359 u8 port = params->port;
4360 u32 sync_offset, media_types;
4361 /* Update PHY configuration */
4362 set_phy_vars(params, vars);
4363
4364 vars->link_status = REG_RD(bp, params->shmem_base +
4365 offsetof(struct shmem_region,
4366 port_mb[port].link_status));
4367
4368 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
4369 vars->phy_flags = PHY_XGXS_FLAG;
4370 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
4371 vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
4372
4373 if (vars->link_up) {
4374 DP(NETIF_MSG_LINK, "phy link up\n");
4375
4376 vars->phy_link_up = 1;
4377 vars->duplex = DUPLEX_FULL;
4378 switch (vars->link_status &
4379 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
4380 case LINK_10THD:
4381 vars->duplex = DUPLEX_HALF;
4382 /* fall thru */
4383 case LINK_10TFD:
4384 vars->line_speed = SPEED_10;
4385 break;
4386
4387 case LINK_100TXHD:
4388 vars->duplex = DUPLEX_HALF;
4389 /* fall thru */
4390 case LINK_100T4:
4391 case LINK_100TXFD:
4392 vars->line_speed = SPEED_100;
4393 break;
4394
4395 case LINK_1000THD:
4396 vars->duplex = DUPLEX_HALF;
4397 /* fall thru */
4398 case LINK_1000TFD:
4399 vars->line_speed = SPEED_1000;
4400 break;
4401
4402 case LINK_2500THD:
4403 vars->duplex = DUPLEX_HALF;
4404 /* fall thru */
4405 case LINK_2500TFD:
4406 vars->line_speed = SPEED_2500;
4407 break;
4408
4409 case LINK_10GTFD:
4410 vars->line_speed = SPEED_10000;
4411 break;
4412 case LINK_20GTFD:
4413 vars->line_speed = SPEED_20000;
4414 break;
4415 default:
4416 break;
4417 }
4418 vars->flow_ctrl = 0;
4419 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
4420 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
4421
4422 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
4423 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
4424
4425 if (!vars->flow_ctrl)
4426 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
4427
4428 if (vars->line_speed &&
4429 ((vars->line_speed == SPEED_10) ||
4430 (vars->line_speed == SPEED_100))) {
4431 vars->phy_flags |= PHY_SGMII_FLAG;
4432 } else {
4433 vars->phy_flags &= ~PHY_SGMII_FLAG;
4434 }
4435 if (vars->line_speed &&
4436 USES_WARPCORE(bp) &&
4437 (vars->line_speed == SPEED_1000))
4438 vars->phy_flags |= PHY_SGMII_FLAG;
4439 /* anything 10 and over uses the bmac */
4440 link_10g_plus = (vars->line_speed >= SPEED_10000);
4441
4442 if (link_10g_plus) {
4443 if (USES_WARPCORE(bp))
4444 vars->mac_type = MAC_TYPE_XMAC;
4445 else
4446 vars->mac_type = MAC_TYPE_BMAC;
4447 } else {
4448 if (USES_WARPCORE(bp))
4449 vars->mac_type = MAC_TYPE_UMAC;
4450 else
4451 vars->mac_type = MAC_TYPE_EMAC;
4452 }
4453 } else { /* link down */
4454 DP(NETIF_MSG_LINK, "phy link down\n");
4455
4456 vars->phy_link_up = 0;
4457
4458 vars->line_speed = 0;
4459 vars->duplex = DUPLEX_FULL;
4460 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
4461
4462 /* indicate no mac active */
4463 vars->mac_type = MAC_TYPE_NONE;
4464 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
4465 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
4466 }
4467
4468 /* Sync media type */
4469 sync_offset = params->shmem_base +
4470 offsetof(struct shmem_region,
4471 dev_info.port_hw_config[port].media_type);
4472 media_types = REG_RD(bp, sync_offset);
4473
4474 params->phy[INT_PHY].media_type =
4475 (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
4476 PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
4477 params->phy[EXT_PHY1].media_type =
4478 (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
4479 PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
4480 params->phy[EXT_PHY2].media_type =
4481 (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
4482 PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
4483 DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
4484
4485 /* Sync AEU offset */
4486 sync_offset = params->shmem_base +
4487 offsetof(struct shmem_region,
4488 dev_info.port_hw_config[port].aeu_int_mask);
4489
4490 vars->aeu_int_mask = REG_RD(bp, sync_offset);
4491
4492 /* Sync PFC status */
4493 if (vars->link_status & LINK_STATUS_PFC_ENABLED)
4494 params->feature_config_flags |=
4495 FEATURE_CONFIG_PFC_ENABLED;
4496 else
4497 params->feature_config_flags &=
4498 ~FEATURE_CONFIG_PFC_ENABLED;
4499
4500 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
4501 vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
4502 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
4503 vars->line_speed, vars->duplex, vars->flow_ctrl);
4504}
4505
4506
4507static void bnx2x_set_master_ln(struct link_params *params,
4508 struct bnx2x_phy *phy)
4509{
4510 struct bnx2x *bp = params->bp;
4511 u16 new_master_ln, ser_lane;
4512 ser_lane = ((params->lane_config &
4513 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
4514 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
4515
4516 /* set the master_ln for AN */
4517 CL22_RD_OVER_CL45(bp, phy,
4518 MDIO_REG_BANK_XGXS_BLOCK2,
4519 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
4520 &new_master_ln);
4521
4522 CL22_WR_OVER_CL45(bp, phy,
4523 MDIO_REG_BANK_XGXS_BLOCK2 ,
4524 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
4525 (new_master_ln | ser_lane));
4526}
4527
4528static int bnx2x_reset_unicore(struct link_params *params,
4529 struct bnx2x_phy *phy,
4530 u8 set_serdes)
4531{
4532 struct bnx2x *bp = params->bp;
4533 u16 mii_control;
4534 u16 i;
4535 CL22_RD_OVER_CL45(bp, phy,
4536 MDIO_REG_BANK_COMBO_IEEE0,
4537 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
4538
4539 /* reset the unicore */
4540 CL22_WR_OVER_CL45(bp, phy,
4541 MDIO_REG_BANK_COMBO_IEEE0,
4542 MDIO_COMBO_IEEE0_MII_CONTROL,
4543 (mii_control |
4544 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
4545 if (set_serdes)
4546 bnx2x_set_serdes_access(bp, params->port);
4547
4548 /* wait for the reset to self clear */
4549 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
4550 udelay(5);
4551
4552 /* the reset erased the previous bank value */
4553 CL22_RD_OVER_CL45(bp, phy,
4554 MDIO_REG_BANK_COMBO_IEEE0,
4555 MDIO_COMBO_IEEE0_MII_CONTROL,
4556 &mii_control);
4557
4558 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
4559 udelay(5);
4560 return 0;
4561 }
4562 }
4563
4564 netdev_err(bp->dev, "Warning: PHY was not initialized,"
4565 " Port %d\n",
4566 params->port);
4567 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
4568 return -EINVAL;
4569
4570}
4571
4572static void bnx2x_set_swap_lanes(struct link_params *params,
4573 struct bnx2x_phy *phy)
4574{
4575 struct bnx2x *bp = params->bp;
4576 /*
4577 * Each two bits represents a lane number:
4578 * No swap is 0123 => 0x1b no need to enable the swap
4579 */
4580 u16 ser_lane, rx_lane_swap, tx_lane_swap;
4581
4582 ser_lane = ((params->lane_config &
4583 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
4584 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
4585 rx_lane_swap = ((params->lane_config &
4586 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
4587 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
4588 tx_lane_swap = ((params->lane_config &
4589 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
4590 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
4591
4592 if (rx_lane_swap != 0x1b) {
4593 CL22_WR_OVER_CL45(bp, phy,
4594 MDIO_REG_BANK_XGXS_BLOCK2,
4595 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
4596 (rx_lane_swap |
4597 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
4598 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
4599 } else {
4600 CL22_WR_OVER_CL45(bp, phy,
4601 MDIO_REG_BANK_XGXS_BLOCK2,
4602 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
4603 }
4604
4605 if (tx_lane_swap != 0x1b) {
4606 CL22_WR_OVER_CL45(bp, phy,
4607 MDIO_REG_BANK_XGXS_BLOCK2,
4608 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
4609 (tx_lane_swap |
4610 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
4611 } else {
4612 CL22_WR_OVER_CL45(bp, phy,
4613 MDIO_REG_BANK_XGXS_BLOCK2,
4614 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
4615 }
4616}
4617
4618static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
4619 struct link_params *params)
4620{
4621 struct bnx2x *bp = params->bp;
4622 u16 control2;
4623 CL22_RD_OVER_CL45(bp, phy,
4624 MDIO_REG_BANK_SERDES_DIGITAL,
4625 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
4626 &control2);
4627 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
4628 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
4629 else
4630 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
4631 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
4632 phy->speed_cap_mask, control2);
4633 CL22_WR_OVER_CL45(bp, phy,
4634 MDIO_REG_BANK_SERDES_DIGITAL,
4635 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
4636 control2);
4637
4638 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
4639 (phy->speed_cap_mask &
4640 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
4641 DP(NETIF_MSG_LINK, "XGXS\n");
4642
4643 CL22_WR_OVER_CL45(bp, phy,
4644 MDIO_REG_BANK_10G_PARALLEL_DETECT,
4645 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
4646 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
4647
4648 CL22_RD_OVER_CL45(bp, phy,
4649 MDIO_REG_BANK_10G_PARALLEL_DETECT,
4650 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
4651 &control2);
4652
4653
4654 control2 |=
4655 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
4656
4657 CL22_WR_OVER_CL45(bp, phy,
4658 MDIO_REG_BANK_10G_PARALLEL_DETECT,
4659 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
4660 control2);
4661
4662 /* Disable parallel detection of HiG */
4663 CL22_WR_OVER_CL45(bp, phy,
4664 MDIO_REG_BANK_XGXS_BLOCK2,
4665 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
4666 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
4667 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
4668 }
4669}
4670
4671static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
4672 struct link_params *params,
4673 struct link_vars *vars,
4674 u8 enable_cl73)
4675{
4676 struct bnx2x *bp = params->bp;
4677 u16 reg_val;
4678
4679 /* CL37 Autoneg */
4680 CL22_RD_OVER_CL45(bp, phy,
4681 MDIO_REG_BANK_COMBO_IEEE0,
4682 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
4683
4684 /* CL37 Autoneg Enabled */
4685 if (vars->line_speed == SPEED_AUTO_NEG)
4686 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
4687 else /* CL37 Autoneg Disabled */
4688 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
4689 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
4690
4691 CL22_WR_OVER_CL45(bp, phy,
4692 MDIO_REG_BANK_COMBO_IEEE0,
4693 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
4694
4695 /* Enable/Disable Autodetection */
4696
4697 CL22_RD_OVER_CL45(bp, phy,
4698 MDIO_REG_BANK_SERDES_DIGITAL,
4699 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
4700 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
4701 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
4702 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
4703 if (vars->line_speed == SPEED_AUTO_NEG)
4704 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
4705 else
4706 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
4707
4708 CL22_WR_OVER_CL45(bp, phy,
4709 MDIO_REG_BANK_SERDES_DIGITAL,
4710 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
4711
4712 /* Enable TetonII and BAM autoneg */
4713 CL22_RD_OVER_CL45(bp, phy,
4714 MDIO_REG_BANK_BAM_NEXT_PAGE,
4715 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
4716 &reg_val);
4717 if (vars->line_speed == SPEED_AUTO_NEG) {
4718 /* Enable BAM aneg Mode and TetonII aneg Mode */
4719 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
4720 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
4721 } else {
4722 /* TetonII and BAM Autoneg Disabled */
4723 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
4724 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
4725 }
4726 CL22_WR_OVER_CL45(bp, phy,
4727 MDIO_REG_BANK_BAM_NEXT_PAGE,
4728 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
4729 reg_val);
4730
4731 if (enable_cl73) {
4732 /* Enable Cl73 FSM status bits */
4733 CL22_WR_OVER_CL45(bp, phy,
4734 MDIO_REG_BANK_CL73_USERB0,
4735 MDIO_CL73_USERB0_CL73_UCTRL,
4736 0xe);
4737
4738 /* Enable BAM Station Manager*/
4739 CL22_WR_OVER_CL45(bp, phy,
4740 MDIO_REG_BANK_CL73_USERB0,
4741 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
4742 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
4743 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
4744 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
4745
4746 /* Advertise CL73 link speeds */
4747 CL22_RD_OVER_CL45(bp, phy,
4748 MDIO_REG_BANK_CL73_IEEEB1,
4749 MDIO_CL73_IEEEB1_AN_ADV2,
4750 &reg_val);
4751 if (phy->speed_cap_mask &
4752 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
4753 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
4754 if (phy->speed_cap_mask &
4755 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
4756 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
4757
4758 CL22_WR_OVER_CL45(bp, phy,
4759 MDIO_REG_BANK_CL73_IEEEB1,
4760 MDIO_CL73_IEEEB1_AN_ADV2,
4761 reg_val);
4762
4763 /* CL73 Autoneg Enabled */
4764 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
4765
4766 } else /* CL73 Autoneg Disabled */
4767 reg_val = 0;
4768
4769 CL22_WR_OVER_CL45(bp, phy,
4770 MDIO_REG_BANK_CL73_IEEEB0,
4771 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
4772}
4773
4774/* program SerDes, forced speed */
4775static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4776 struct link_params *params,
4777 struct link_vars *vars)
4778{
4779 struct bnx2x *bp = params->bp;
4780 u16 reg_val;
4781
4782 /* program duplex, disable autoneg and sgmii*/
4783 CL22_RD_OVER_CL45(bp, phy,
4784 MDIO_REG_BANK_COMBO_IEEE0,
4785 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
4786 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
4787 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
4788 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
4789 if (phy->req_duplex == DUPLEX_FULL)
4790 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
4791 CL22_WR_OVER_CL45(bp, phy,
4792 MDIO_REG_BANK_COMBO_IEEE0,
4793 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
4794
4795 /*
4796 * program speed
4797 * - needed only if the speed is greater than 1G (2.5G or 10G)
4798 */
4799 CL22_RD_OVER_CL45(bp, phy,
4800 MDIO_REG_BANK_SERDES_DIGITAL,
4801 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
4802 /* clearing the speed value before setting the right speed */
4803 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
4804
4805 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
4806 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
4807
4808 if (!((vars->line_speed == SPEED_1000) ||
4809 (vars->line_speed == SPEED_100) ||
4810 (vars->line_speed == SPEED_10))) {
4811
4812 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
4813 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
4814 if (vars->line_speed == SPEED_10000)
4815 reg_val |=
4816 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
4817 }
4818
4819 CL22_WR_OVER_CL45(bp, phy,
4820 MDIO_REG_BANK_SERDES_DIGITAL,
4821 MDIO_SERDES_DIGITAL_MISC1, reg_val);
4822
4823}
4824
4825static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
4826 struct link_params *params)
4827{
4828 struct bnx2x *bp = params->bp;
4829 u16 val = 0;
4830
4831 /* configure the 48 bits for BAM AN */
4832
4833 /* set extended capabilities */
4834 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
4835 val |= MDIO_OVER_1G_UP1_2_5G;
4836 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
4837 val |= MDIO_OVER_1G_UP1_10G;
4838 CL22_WR_OVER_CL45(bp, phy,
4839 MDIO_REG_BANK_OVER_1G,
4840 MDIO_OVER_1G_UP1, val);
4841
4842 CL22_WR_OVER_CL45(bp, phy,
4843 MDIO_REG_BANK_OVER_1G,
4844 MDIO_OVER_1G_UP3, 0x400);
4845}
4846
4847static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
4848 struct link_params *params,
4849 u16 ieee_fc)
4850{
4851 struct bnx2x *bp = params->bp;
4852 u16 val;
4853 /* for AN, we are always publishing full duplex */
4854
4855 CL22_WR_OVER_CL45(bp, phy,
4856 MDIO_REG_BANK_COMBO_IEEE0,
4857 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
4858 CL22_RD_OVER_CL45(bp, phy,
4859 MDIO_REG_BANK_CL73_IEEEB1,
4860 MDIO_CL73_IEEEB1_AN_ADV1, &val);
4861 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
4862 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
4863 CL22_WR_OVER_CL45(bp, phy,
4864 MDIO_REG_BANK_CL73_IEEEB1,
4865 MDIO_CL73_IEEEB1_AN_ADV1, val);
4866}
4867
4868static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
4869 struct link_params *params,
4870 u8 enable_cl73)
4871{
4872 struct bnx2x *bp = params->bp;
4873 u16 mii_control;
4874
4875 DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
4876 /* Enable and restart BAM/CL37 aneg */
4877
4878 if (enable_cl73) {
4879 CL22_RD_OVER_CL45(bp, phy,
4880 MDIO_REG_BANK_CL73_IEEEB0,
4881 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
4882 &mii_control);
4883
4884 CL22_WR_OVER_CL45(bp, phy,
4885 MDIO_REG_BANK_CL73_IEEEB0,
4886 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
4887 (mii_control |
4888 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
4889 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
4890 } else {
4891
4892 CL22_RD_OVER_CL45(bp, phy,
4893 MDIO_REG_BANK_COMBO_IEEE0,
4894 MDIO_COMBO_IEEE0_MII_CONTROL,
4895 &mii_control);
4896 DP(NETIF_MSG_LINK,
4897 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
4898 mii_control);
4899 CL22_WR_OVER_CL45(bp, phy,
4900 MDIO_REG_BANK_COMBO_IEEE0,
4901 MDIO_COMBO_IEEE0_MII_CONTROL,
4902 (mii_control |
4903 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
4904 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
4905 }
4906}
4907
4908static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
4909 struct link_params *params,
4910 struct link_vars *vars)
4911{
4912 struct bnx2x *bp = params->bp;
4913 u16 control1;
4914
4915 /* in SGMII mode, the unicore is always slave */
4916
4917 CL22_RD_OVER_CL45(bp, phy,
4918 MDIO_REG_BANK_SERDES_DIGITAL,
4919 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
4920 &control1);
4921 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
4922 /* set sgmii mode (and not fiber) */
4923 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
4924 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
4925 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
4926 CL22_WR_OVER_CL45(bp, phy,
4927 MDIO_REG_BANK_SERDES_DIGITAL,
4928 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
4929 control1);
4930
4931 /* if forced speed */
4932 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
4933 /* set speed, disable autoneg */
4934 u16 mii_control;
4935
4936 CL22_RD_OVER_CL45(bp, phy,
4937 MDIO_REG_BANK_COMBO_IEEE0,
4938 MDIO_COMBO_IEEE0_MII_CONTROL,
4939 &mii_control);
4940 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
4941 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
4942 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
4943
4944 switch (vars->line_speed) {
4945 case SPEED_100:
4946 mii_control |=
4947 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
4948 break;
4949 case SPEED_1000:
4950 mii_control |=
4951 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
4952 break;
4953 case SPEED_10:
4954 /* there is nothing to set for 10M */
4955 break;
4956 default:
4957 /* invalid speed for SGMII */
4958 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
4959 vars->line_speed);
4960 break;
4961 }
4962
4963 /* setting the full duplex */
4964 if (phy->req_duplex == DUPLEX_FULL)
4965 mii_control |=
4966 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
4967 CL22_WR_OVER_CL45(bp, phy,
4968 MDIO_REG_BANK_COMBO_IEEE0,
4969 MDIO_COMBO_IEEE0_MII_CONTROL,
4970 mii_control);
4971
4972 } else { /* AN mode */
4973 /* enable and restart AN */
4974 bnx2x_restart_autoneg(phy, params, 0);
4975 }
4976}
4977
4978
4979/*
4980 * link management
4981 */
4982
4983static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
4984 struct link_params *params)
4985{
4986 struct bnx2x *bp = params->bp;
4987 u16 pd_10g, status2_1000x;
4988 if (phy->req_line_speed != SPEED_AUTO_NEG)
4989 return 0;
4990 CL22_RD_OVER_CL45(bp, phy,
4991 MDIO_REG_BANK_SERDES_DIGITAL,
4992 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
4993 &status2_1000x);
4994 CL22_RD_OVER_CL45(bp, phy,
4995 MDIO_REG_BANK_SERDES_DIGITAL,
4996 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
4997 &status2_1000x);
4998 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
4999 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
5000 params->port);
5001 return 1;
5002 }
5003
5004 CL22_RD_OVER_CL45(bp, phy,
5005 MDIO_REG_BANK_10G_PARALLEL_DETECT,
5006 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
5007 &pd_10g);
5008
5009 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
5010 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
5011 params->port);
5012 return 1;
5013 }
5014 return 0;
5015}
5016
5017static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
5018 struct link_params *params,
5019 struct link_vars *vars,
5020 u32 gp_status)
5021{
5022 struct bnx2x *bp = params->bp;
5023 u16 ld_pause; /* local driver */
5024 u16 lp_pause; /* link partner */
5025 u16 pause_result;
5026
5027 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5028
5029 /* resolve from gp_status in case of AN complete and not sgmii */
5030 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
5031 vars->flow_ctrl = phy->req_flow_ctrl;
5032 else if (phy->req_line_speed != SPEED_AUTO_NEG)
5033 vars->flow_ctrl = params->req_fc_auto_adv;
5034 else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
5035 (!(vars->phy_flags & PHY_SGMII_FLAG))) {
5036 if (bnx2x_direct_parallel_detect_used(phy, params)) {
5037 vars->flow_ctrl = params->req_fc_auto_adv;
5038 return;
5039 }
5040 if ((gp_status &
5041 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
5042 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
5043 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
5044 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
5045
5046 CL22_RD_OVER_CL45(bp, phy,
5047 MDIO_REG_BANK_CL73_IEEEB1,
5048 MDIO_CL73_IEEEB1_AN_ADV1,
5049 &ld_pause);
5050 CL22_RD_OVER_CL45(bp, phy,
5051 MDIO_REG_BANK_CL73_IEEEB1,
5052 MDIO_CL73_IEEEB1_AN_LP_ADV1,
5053 &lp_pause);
5054 pause_result = (ld_pause &
5055 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
5056 >> 8;
5057 pause_result |= (lp_pause &
5058 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
5059 >> 10;
5060 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
5061 pause_result);
5062 } else {
5063 CL22_RD_OVER_CL45(bp, phy,
5064 MDIO_REG_BANK_COMBO_IEEE0,
5065 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
5066 &ld_pause);
5067 CL22_RD_OVER_CL45(bp, phy,
5068 MDIO_REG_BANK_COMBO_IEEE0,
5069 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
5070 &lp_pause);
5071 pause_result = (ld_pause &
5072 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
5073 pause_result |= (lp_pause &
5074 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
5075 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
5076 pause_result);
5077 }
5078 bnx2x_pause_resolve(vars, pause_result);
5079 }
5080 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
5081}
5082
5083static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5084 struct link_params *params)
5085{
5086 struct bnx2x *bp = params->bp;
5087 u16 rx_status, ustat_val, cl37_fsm_received;
5088 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
5089 /* Step 1: Make sure signal is detected */
5090 CL22_RD_OVER_CL45(bp, phy,
5091 MDIO_REG_BANK_RX0,
5092 MDIO_RX0_RX_STATUS,
5093 &rx_status);
5094 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
5095 (MDIO_RX0_RX_STATUS_SIGDET)) {
5096 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
5097 "rx_status(0x80b0) = 0x%x\n", rx_status);
5098 CL22_WR_OVER_CL45(bp, phy,
5099 MDIO_REG_BANK_CL73_IEEEB0,
5100 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
5101 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
5102 return;
5103 }
5104 /* Step 2: Check CL73 state machine */
5105 CL22_RD_OVER_CL45(bp, phy,
5106 MDIO_REG_BANK_CL73_USERB0,
5107 MDIO_CL73_USERB0_CL73_USTAT1,
5108 &ustat_val);
5109 if ((ustat_val &
5110 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
5111 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
5112 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
5113 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
5114 DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
5115 "ustat_val(0x8371) = 0x%x\n", ustat_val);
5116 return;
5117 }
5118 /*
5119 * Step 3: Check CL37 Message Pages received to indicate LP
5120 * supports only CL37
5121 */
5122 CL22_RD_OVER_CL45(bp, phy,
5123 MDIO_REG_BANK_REMOTE_PHY,
5124 MDIO_REMOTE_PHY_MISC_RX_STATUS,
5125 &cl37_fsm_received);
5126 if ((cl37_fsm_received &
5127 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
5128 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
5129 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
5130 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
5131 DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
5132 "misc_rx_status(0x8330) = 0x%x\n",
5133 cl37_fsm_received);
5134 return;
5135 }
5136 /*
5137 * The combined cl37/cl73 fsm state information indicating that
5138 * we are connected to a device which does not support cl73, but
5139 * does support cl37 BAM. In this case we disable cl73 and
5140 * restart cl37 auto-neg
5141 */
5142
5143 /* Disable CL73 */
5144 CL22_WR_OVER_CL45(bp, phy,
5145 MDIO_REG_BANK_CL73_IEEEB0,
5146 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
5147 0);
5148 /* Restart CL37 autoneg */
5149 bnx2x_restart_autoneg(phy, params, 0);
5150 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
5151}
5152
5153static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
5154 struct link_params *params,
5155 struct link_vars *vars,
5156 u32 gp_status)
5157{
5158 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
5159 vars->link_status |=
5160 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
5161
5162 if (bnx2x_direct_parallel_detect_used(phy, params))
5163 vars->link_status |=
5164 LINK_STATUS_PARALLEL_DETECTION_USED;
5165}
5166static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
5167 struct link_params *params,
5168 struct link_vars *vars,
5169 u16 is_link_up,
5170 u16 speed_mask,
5171 u16 is_duplex)
5172{
5173 struct bnx2x *bp = params->bp;
5174 if (phy->req_line_speed == SPEED_AUTO_NEG)
5175 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
5176 if (is_link_up) {
5177 DP(NETIF_MSG_LINK, "phy link up\n");
5178
5179 vars->phy_link_up = 1;
5180 vars->link_status |= LINK_STATUS_LINK_UP;
5181
5182 switch (speed_mask) {
5183 case GP_STATUS_10M:
5184 vars->line_speed = SPEED_10;
5185 if (vars->duplex == DUPLEX_FULL)
5186 vars->link_status |= LINK_10TFD;
5187 else
5188 vars->link_status |= LINK_10THD;
5189 break;
5190
5191 case GP_STATUS_100M:
5192 vars->line_speed = SPEED_100;
5193 if (vars->duplex == DUPLEX_FULL)
5194 vars->link_status |= LINK_100TXFD;
5195 else
5196 vars->link_status |= LINK_100TXHD;
5197 break;
5198
5199 case GP_STATUS_1G:
5200 case GP_STATUS_1G_KX:
5201 vars->line_speed = SPEED_1000;
5202 if (vars->duplex == DUPLEX_FULL)
5203 vars->link_status |= LINK_1000TFD;
5204 else
5205 vars->link_status |= LINK_1000THD;
5206 break;
5207
5208 case GP_STATUS_2_5G:
5209 vars->line_speed = SPEED_2500;
5210 if (vars->duplex == DUPLEX_FULL)
5211 vars->link_status |= LINK_2500TFD;
5212 else
5213 vars->link_status |= LINK_2500THD;
5214 break;
5215
5216 case GP_STATUS_5G:
5217 case GP_STATUS_6G:
5218 DP(NETIF_MSG_LINK,
5219 "link speed unsupported gp_status 0x%x\n",
5220 speed_mask);
5221 return -EINVAL;
5222
5223 case GP_STATUS_10G_KX4:
5224 case GP_STATUS_10G_HIG:
5225 case GP_STATUS_10G_CX4:
5226 case GP_STATUS_10G_KR:
5227 case GP_STATUS_10G_SFI:
5228 case GP_STATUS_10G_XFI:
5229 vars->line_speed = SPEED_10000;
5230 vars->link_status |= LINK_10GTFD;
5231 break;
5232 case GP_STATUS_20G_DXGXS:
5233 vars->line_speed = SPEED_20000;
5234 vars->link_status |= LINK_20GTFD;
5235 break;
5236 default:
5237 DP(NETIF_MSG_LINK,
5238 "link speed unsupported gp_status 0x%x\n",
5239 speed_mask);
5240 return -EINVAL;
5241 }
5242 } else { /* link_down */
5243 DP(NETIF_MSG_LINK, "phy link down\n");
5244
5245 vars->phy_link_up = 0;
5246
5247 vars->duplex = DUPLEX_FULL;
5248 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5249 vars->mac_type = MAC_TYPE_NONE;
5250 }
5251 DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
5252 vars->phy_link_up, vars->line_speed);
5253 return 0;
5254}
5255
5256static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
5257 struct link_params *params,
5258 struct link_vars *vars)
5259{
5260
5261 struct bnx2x *bp = params->bp;
5262
5263 u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
5264 int rc = 0;
5265
5266 /* Read gp_status */
5267 CL22_RD_OVER_CL45(bp, phy,
5268 MDIO_REG_BANK_GP_STATUS,
5269 MDIO_GP_STATUS_TOP_AN_STATUS1,
5270 &gp_status);
5271 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
5272 duplex = DUPLEX_FULL;
5273 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
5274 link_up = 1;
5275 speed_mask = gp_status & GP_STATUS_SPEED_MASK;
5276 DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
5277 gp_status, link_up, speed_mask);
5278 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
5279 duplex);
5280 if (rc == -EINVAL)
5281 return rc;
5282
5283 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
5284 if (SINGLE_MEDIA_DIRECT(params)) {
5285 bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
5286 if (phy->req_line_speed == SPEED_AUTO_NEG)
5287 bnx2x_xgxs_an_resolve(phy, params, vars,
5288 gp_status);
5289 }
5290 } else { /* link_down */
5291 if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
5292 SINGLE_MEDIA_DIRECT(params)) {
5293 /* Check signal is detected */
5294 bnx2x_check_fallback_to_cl37(phy, params);
5295 }
5296 }
5297
5298 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
5299 vars->duplex, vars->flow_ctrl, vars->link_status);
5300 return rc;
5301}
5302
5303static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5304 struct link_params *params,
5305 struct link_vars *vars)
5306{
5307
5308 struct bnx2x *bp = params->bp;
5309
5310 u8 lane;
5311 u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
5312 int rc = 0;
5313 lane = bnx2x_get_warpcore_lane(phy, params);
5314 /* Read gp_status */
5315 if (phy->req_line_speed > SPEED_10000) {
5316 u16 temp_link_up;
5317 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5318 1, &temp_link_up);
5319 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5320 1, &link_up);
5321 DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
5322 temp_link_up, link_up);
5323 link_up &= (1<<2);
5324 if (link_up)
5325 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5326 } else {
5327 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5328 MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
5329 DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
5330 /* Check for either KR or generic link up. */
5331 gp_status1 = ((gp_status1 >> 8) & 0xf) |
5332 ((gp_status1 >> 12) & 0xf);
5333 link_up = gp_status1 & (1 << lane);
5334 if (link_up && SINGLE_MEDIA_DIRECT(params)) {
5335 u16 pd, gp_status4;
5336 if (phy->req_line_speed == SPEED_AUTO_NEG) {
5337 /* Check Autoneg complete */
5338 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5339 MDIO_WC_REG_GP2_STATUS_GP_2_4,
5340 &gp_status4);
5341 if (gp_status4 & ((1<<12)<<lane))
5342 vars->link_status |=
5343 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
5344
5345 /* Check parallel detect used */
5346 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5347 MDIO_WC_REG_PAR_DET_10G_STATUS,
5348 &pd);
5349 if (pd & (1<<15))
5350 vars->link_status |=
5351 LINK_STATUS_PARALLEL_DETECTION_USED;
5352 }
5353 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5354 }
5355 }
5356
5357 if (lane < 2) {
5358 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5359 MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
5360 } else {
5361 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5362 MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
5363 }
5364 DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
5365
5366 if ((lane & 1) == 0)
5367 gp_speed <<= 8;
5368 gp_speed &= 0x3f00;
5369
5370
5371 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
5372 duplex);
5373
5374 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
5375 vars->duplex, vars->flow_ctrl, vars->link_status);
5376 return rc;
5377}
5378static void bnx2x_set_gmii_tx_driver(struct link_params *params)
5379{
5380 struct bnx2x *bp = params->bp;
5381 struct bnx2x_phy *phy = &params->phy[INT_PHY];
5382 u16 lp_up2;
5383 u16 tx_driver;
5384 u16 bank;
5385
5386 /* read precomp */
5387 CL22_RD_OVER_CL45(bp, phy,
5388 MDIO_REG_BANK_OVER_1G,
5389 MDIO_OVER_1G_LP_UP2, &lp_up2);
5390
5391 /* bits [10:7] at lp_up2, positioned at [15:12] */
5392 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
5393 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
5394 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
5395
5396 if (lp_up2 == 0)
5397 return;
5398
5399 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
5400 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
5401 CL22_RD_OVER_CL45(bp, phy,
5402 bank,
5403 MDIO_TX0_TX_DRIVER, &tx_driver);
5404
5405 /* replace tx_driver bits [15:12] */
5406 if (lp_up2 !=
5407 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
5408 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
5409 tx_driver |= lp_up2;
5410 CL22_WR_OVER_CL45(bp, phy,
5411 bank,
5412 MDIO_TX0_TX_DRIVER, tx_driver);
5413 }
5414 }
5415}
5416
5417static int bnx2x_emac_program(struct link_params *params,
5418 struct link_vars *vars)
5419{
5420 struct bnx2x *bp = params->bp;
5421 u8 port = params->port;
5422 u16 mode = 0;
5423
5424 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
5425 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
5426 EMAC_REG_EMAC_MODE,
5427 (EMAC_MODE_25G_MODE |
5428 EMAC_MODE_PORT_MII_10M |
5429 EMAC_MODE_HALF_DUPLEX));
5430 switch (vars->line_speed) {
5431 case SPEED_10:
5432 mode |= EMAC_MODE_PORT_MII_10M;
5433 break;
5434
5435 case SPEED_100:
5436 mode |= EMAC_MODE_PORT_MII;
5437 break;
5438
5439 case SPEED_1000:
5440 mode |= EMAC_MODE_PORT_GMII;
5441 break;
5442
5443 case SPEED_2500:
5444 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
5445 break;
5446
5447 default:
5448 /* 10G not valid for EMAC */
5449 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
5450 vars->line_speed);
5451 return -EINVAL;
5452 }
5453
5454 if (vars->duplex == DUPLEX_HALF)
5455 mode |= EMAC_MODE_HALF_DUPLEX;
5456 bnx2x_bits_en(bp,
5457 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
5458 mode);
5459
5460 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
5461 return 0;
5462}
5463
5464static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
5465 struct link_params *params)
5466{
5467
5468 u16 bank, i = 0;
5469 struct bnx2x *bp = params->bp;
5470
5471 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
5472 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
5473 CL22_WR_OVER_CL45(bp, phy,
5474 bank,
5475 MDIO_RX0_RX_EQ_BOOST,
5476 phy->rx_preemphasis[i]);
5477 }
5478
5479 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
5480 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
5481 CL22_WR_OVER_CL45(bp, phy,
5482 bank,
5483 MDIO_TX0_TX_DRIVER,
5484 phy->tx_preemphasis[i]);
5485 }
5486}
5487
5488static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
5489 struct link_params *params,
5490 struct link_vars *vars)
5491{
5492 struct bnx2x *bp = params->bp;
5493 u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
5494 (params->loopback_mode == LOOPBACK_XGXS));
5495 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
5496 if (SINGLE_MEDIA_DIRECT(params) &&
5497 (params->feature_config_flags &
5498 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
5499 bnx2x_set_preemphasis(phy, params);
5500
5501 /* forced speed requested? */
5502 if (vars->line_speed != SPEED_AUTO_NEG ||
5503 (SINGLE_MEDIA_DIRECT(params) &&
5504 params->loopback_mode == LOOPBACK_EXT)) {
5505 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
5506
5507 /* disable autoneg */
5508 bnx2x_set_autoneg(phy, params, vars, 0);
5509
5510 /* program speed and duplex */
5511 bnx2x_program_serdes(phy, params, vars);
5512
5513 } else { /* AN_mode */
5514 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
5515
5516 /* AN enabled */
5517 bnx2x_set_brcm_cl37_advertisement(phy, params);
5518
5519 /* program duplex & pause advertisement (for aneg) */
5520 bnx2x_set_ieee_aneg_advertisement(phy, params,
5521 vars->ieee_fc);
5522
5523 /* enable autoneg */
5524 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
5525
5526 /* enable and restart AN */
5527 bnx2x_restart_autoneg(phy, params, enable_cl73);
5528 }
5529
5530 } else { /* SGMII mode */
5531 DP(NETIF_MSG_LINK, "SGMII\n");
5532
5533 bnx2x_initialize_sgmii_process(phy, params, vars);
5534 }
5535}
5536
5537static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
5538 struct link_params *params,
5539 struct link_vars *vars)
5540{
5541 int rc;
5542 vars->phy_flags |= PHY_XGXS_FLAG;
5543 if ((phy->req_line_speed &&
5544 ((phy->req_line_speed == SPEED_100) ||
5545 (phy->req_line_speed == SPEED_10))) ||
5546 (!phy->req_line_speed &&
5547 (phy->speed_cap_mask >=
5548 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
5549 (phy->speed_cap_mask <
5550 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
5551 (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
5552 vars->phy_flags |= PHY_SGMII_FLAG;
5553 else
5554 vars->phy_flags &= ~PHY_SGMII_FLAG;
5555
5556 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
5557 bnx2x_set_aer_mmd(params, phy);
5558 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
5559 bnx2x_set_master_ln(params, phy);
5560
5561 rc = bnx2x_reset_unicore(params, phy, 0);
5562 /* reset the SerDes and wait for reset bit return low */
5563 if (rc != 0)
5564 return rc;
5565
5566 bnx2x_set_aer_mmd(params, phy);
5567 /* setting the masterLn_def again after the reset */
5568 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5569 bnx2x_set_master_ln(params, phy);
5570 bnx2x_set_swap_lanes(params, phy);
5571 }
5572
5573 return rc;
5574}
5575
5576static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
5577 struct bnx2x_phy *phy,
5578 struct link_params *params)
5579{
5580 u16 cnt, ctrl;
5581 /* Wait for soft reset to get cleared up to 1 sec */
5582 for (cnt = 0; cnt < 1000; cnt++) {
5583 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
5584 bnx2x_cl22_read(bp, phy,
5585 MDIO_PMA_REG_CTRL, &ctrl);
5586 else
5587 bnx2x_cl45_read(bp, phy,
5588 MDIO_PMA_DEVAD,
5589 MDIO_PMA_REG_CTRL, &ctrl);
5590 if (!(ctrl & (1<<15)))
5591 break;
5592 msleep(1);
5593 }
5594
5595 if (cnt == 1000)
5596 netdev_err(bp->dev, "Warning: PHY was not initialized,"
5597 " Port %d\n",
5598 params->port);
5599 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
5600 return cnt;
5601}
5602
5603static void bnx2x_link_int_enable(struct link_params *params)
5604{
5605 u8 port = params->port;
5606 u32 mask;
5607 struct bnx2x *bp = params->bp;
5608
5609 /* Setting the status to report on link up for either XGXS or SerDes */
5610 if (CHIP_IS_E3(bp)) {
5611 mask = NIG_MASK_XGXS0_LINK_STATUS;
5612 if (!(SINGLE_MEDIA_DIRECT(params)))
5613 mask |= NIG_MASK_MI_INT;
5614 } else if (params->switch_cfg == SWITCH_CFG_10G) {
5615 mask = (NIG_MASK_XGXS0_LINK10G |
5616 NIG_MASK_XGXS0_LINK_STATUS);
5617 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
5618 if (!(SINGLE_MEDIA_DIRECT(params)) &&
5619 params->phy[INT_PHY].type !=
5620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
5621 mask |= NIG_MASK_MI_INT;
5622 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5623 }
5624
5625 } else { /* SerDes */
5626 mask = NIG_MASK_SERDES0_LINK_STATUS;
5627 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
5628 if (!(SINGLE_MEDIA_DIRECT(params)) &&
5629 params->phy[INT_PHY].type !=
5630 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
5631 mask |= NIG_MASK_MI_INT;
5632 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5633 }
5634 }
5635 bnx2x_bits_en(bp,
5636 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
5637 mask);
5638
5639 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
5640 (params->switch_cfg == SWITCH_CFG_10G),
5641 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
5642 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
5643 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
5644 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
5645 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
5646 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
5647 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
5648 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
5649}
5650
5651static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
5652 u8 exp_mi_int)
5653{
5654 u32 latch_status = 0;
5655
5656 /*
5657 * Disable the MI INT ( external phy int ) by writing 1 to the
5658 * status register. Link down indication is high-active-signal,
5659 * so in this case we need to write the status to clear the XOR
5660 */
5661 /* Read Latched signals */
5662 latch_status = REG_RD(bp,
5663 NIG_REG_LATCH_STATUS_0 + port*8);
5664 DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
5665 /* Handle only those with latched-signal=up.*/
5666 if (exp_mi_int)
5667 bnx2x_bits_en(bp,
5668 NIG_REG_STATUS_INTERRUPT_PORT0
5669 + port*4,
5670 NIG_STATUS_EMAC0_MI_INT);
5671 else
5672 bnx2x_bits_dis(bp,
5673 NIG_REG_STATUS_INTERRUPT_PORT0
5674 + port*4,
5675 NIG_STATUS_EMAC0_MI_INT);
5676
5677 if (latch_status & 1) {
5678
5679 /* For all latched-signal=up : Re-Arm Latch signals */
5680 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
5681 (latch_status & 0xfffe) | (latch_status & 1));
5682 }
5683 /* For all latched-signal=up,Write original_signal to status */
5684}
5685
5686static void bnx2x_link_int_ack(struct link_params *params,
5687 struct link_vars *vars, u8 is_10g_plus)
5688{
5689 struct bnx2x *bp = params->bp;
5690 u8 port = params->port;
5691 u32 mask;
5692 /*
5693 * First reset all status we assume only one line will be
5694 * change at a time
5695 */
5696 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5697 (NIG_STATUS_XGXS0_LINK10G |
5698 NIG_STATUS_XGXS0_LINK_STATUS |
5699 NIG_STATUS_SERDES0_LINK_STATUS));
5700 if (vars->phy_link_up) {
5701 if (USES_WARPCORE(bp))
5702 mask = NIG_STATUS_XGXS0_LINK_STATUS;
5703 else {
5704 if (is_10g_plus)
5705 mask = NIG_STATUS_XGXS0_LINK10G;
5706 else if (params->switch_cfg == SWITCH_CFG_10G) {
5707 /*
5708 * Disable the link interrupt by writing 1 to
5709 * the relevant lane in the status register
5710 */
5711 u32 ser_lane =
5712 ((params->lane_config &
5713 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
5714 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
5715 mask = ((1 << ser_lane) <<
5716 NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
5717 } else
5718 mask = NIG_STATUS_SERDES0_LINK_STATUS;
5719 }
5720 DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
5721 mask);
5722 bnx2x_bits_en(bp,
5723 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5724 mask);
5725 }
5726}
5727
5728static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
5729{
5730 u8 *str_ptr = str;
5731 u32 mask = 0xf0000000;
5732 u8 shift = 8*4;
5733 u8 digit;
5734 u8 remove_leading_zeros = 1;
5735 if (*len < 10) {
5736 /* Need more than 10chars for this format */
5737 *str_ptr = '\0';
5738 (*len)--;
5739 return -EINVAL;
5740 }
5741 while (shift > 0) {
5742
5743 shift -= 4;
5744 digit = ((num & mask) >> shift);
5745 if (digit == 0 && remove_leading_zeros) {
5746 mask = mask >> 4;
5747 continue;
5748 } else if (digit < 0xa)
5749 *str_ptr = digit + '0';
5750 else
5751 *str_ptr = digit - 0xa + 'a';
5752 remove_leading_zeros = 0;
5753 str_ptr++;
5754 (*len)--;
5755 mask = mask >> 4;
5756 if (shift == 4*4) {
5757 *str_ptr = '.';
5758 str_ptr++;
5759 (*len)--;
5760 remove_leading_zeros = 1;
5761 }
5762 }
5763 return 0;
5764}
5765
5766
5767static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
5768{
5769 str[0] = '\0';
5770 (*len)--;
5771 return 0;
5772}
5773
5774int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
5775 u8 *version, u16 len)
5776{
5777 struct bnx2x *bp;
5778 u32 spirom_ver = 0;
5779 int status = 0;
5780 u8 *ver_p = version;
5781 u16 remain_len = len;
5782 if (version == NULL || params == NULL)
5783 return -EINVAL;
5784 bp = params->bp;
5785
5786 /* Extract first external phy*/
5787 version[0] = '\0';
5788 spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
5789
5790 if (params->phy[EXT_PHY1].format_fw_ver) {
5791 status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
5792 ver_p,
5793 &remain_len);
5794 ver_p += (len - remain_len);
5795 }
5796 if ((params->num_phys == MAX_PHYS) &&
5797 (params->phy[EXT_PHY2].ver_addr != 0)) {
5798 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
5799 if (params->phy[EXT_PHY2].format_fw_ver) {
5800 *ver_p = '/';
5801 ver_p++;
5802 remain_len--;
5803 status |= params->phy[EXT_PHY2].format_fw_ver(
5804 spirom_ver,
5805 ver_p,
5806 &remain_len);
5807 ver_p = version + (len - remain_len);
5808 }
5809 }
5810 *ver_p = '\0';
5811 return status;
5812}
5813
5814static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
5815 struct link_params *params)
5816{
5817 u8 port = params->port;
5818 struct bnx2x *bp = params->bp;
5819
5820 if (phy->req_line_speed != SPEED_1000) {
5821 u32 md_devad = 0;
5822
5823 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
5824
5825 if (!CHIP_IS_E3(bp)) {
5826 /* change the uni_phy_addr in the nig */
5827 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
5828 port*0x18));
5829
5830 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
5831 0x5);
5832 }
5833
5834 bnx2x_cl45_write(bp, phy,
5835 5,
5836 (MDIO_REG_BANK_AER_BLOCK +
5837 (MDIO_AER_BLOCK_AER_REG & 0xf)),
5838 0x2800);
5839
5840 bnx2x_cl45_write(bp, phy,
5841 5,
5842 (MDIO_REG_BANK_CL73_IEEEB0 +
5843 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
5844 0x6041);
5845 msleep(200);
5846 /* set aer mmd back */
5847 bnx2x_set_aer_mmd(params, phy);
5848
5849 if (!CHIP_IS_E3(bp)) {
5850 /* and md_devad */
5851 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
5852 md_devad);
5853 }
5854 } else {
5855 u16 mii_ctrl;
5856 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
5857 bnx2x_cl45_read(bp, phy, 5,
5858 (MDIO_REG_BANK_COMBO_IEEE0 +
5859 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
5860 &mii_ctrl);
5861 bnx2x_cl45_write(bp, phy, 5,
5862 (MDIO_REG_BANK_COMBO_IEEE0 +
5863 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
5864 mii_ctrl |
5865 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
5866 }
5867}
5868
5869int bnx2x_set_led(struct link_params *params,
5870 struct link_vars *vars, u8 mode, u32 speed)
5871{
5872 u8 port = params->port;
5873 u16 hw_led_mode = params->hw_led_mode;
5874 int rc = 0;
5875 u8 phy_idx;
5876 u32 tmp;
5877 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5878 struct bnx2x *bp = params->bp;
5879 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
5880 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
5881 speed, hw_led_mode);
5882 /* In case */
5883 for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
5884 if (params->phy[phy_idx].set_link_led) {
5885 params->phy[phy_idx].set_link_led(
5886 &params->phy[phy_idx], params, mode);
5887 }
5888 }
5889
5890 switch (mode) {
5891 case LED_MODE_FRONT_PANEL_OFF:
5892 case LED_MODE_OFF:
5893 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
5894 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5895 SHARED_HW_CFG_LED_MAC1);
5896
5897 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5898 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
5899 break;
5900
5901 case LED_MODE_OPER:
5902 /*
5903 * For all other phys, OPER mode is same as ON, so in case
5904 * link is down, do nothing
5905 */
5906 if (!vars->link_up)
5907 break;
5908 case LED_MODE_ON:
5909 if (((params->phy[EXT_PHY1].type ==
5910 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5911 (params->phy[EXT_PHY1].type ==
5912 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
5913 CHIP_IS_E2(bp) && params->num_phys == 2) {
5914 /*
5915 * This is a work-around for E2+8727 Configurations
5916 */
5917 if (mode == LED_MODE_ON ||
5918 speed == SPEED_10000){
5919 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5920 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5921
5922 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5923 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5924 (tmp | EMAC_LED_OVERRIDE));
5925 /*
5926 * return here without enabling traffic
5927 * LED blink and setting rate in ON mode.
5928 * In oper mode, enabling LED blink
5929 * and setting rate is needed.
5930 */
5931 if (mode == LED_MODE_ON)
5932 return rc;
5933 }
5934 } else if (SINGLE_MEDIA_DIRECT(params)) {
5935 /*
5936 * This is a work-around for HW issue found when link
5937 * is up in CL73
5938 */
5939 if ((!CHIP_IS_E3(bp)) ||
5940 (CHIP_IS_E3(bp) &&
5941 mode == LED_MODE_ON))
5942 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5943
5944 if (CHIP_IS_E1x(bp) ||
5945 CHIP_IS_E2(bp) ||
5946 (mode == LED_MODE_ON))
5947 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5948 else
5949 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5950 hw_led_mode);
5951 } else
5952 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
5953
5954 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
5955 /* Set blinking rate to ~15.9Hz */
5956 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
5957 LED_BLINK_RATE_VAL);
5958 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
5959 port*4, 1);
5960 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5961 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
5962
5963 if (CHIP_IS_E1(bp) &&
5964 ((speed == SPEED_2500) ||
5965 (speed == SPEED_1000) ||
5966 (speed == SPEED_100) ||
5967 (speed == SPEED_10))) {
5968 /*
5969 * On Everest 1 Ax chip versions for speeds less than
5970 * 10G LED scheme is different
5971 */
5972 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5973 + port*4, 1);
5974 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
5975 port*4, 0);
5976 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
5977 port*4, 1);
5978 }
5979 break;
5980
5981 default:
5982 rc = -EINVAL;
5983 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
5984 mode);
5985 break;
5986 }
5987 return rc;
5988
5989}
5990
5991/*
5992 * This function comes to reflect the actual link state read DIRECTLY from the
5993 * HW
5994 */
5995int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
5996 u8 is_serdes)
5997{
5998 struct bnx2x *bp = params->bp;
5999 u16 gp_status = 0, phy_index = 0;
6000 u8 ext_phy_link_up = 0, serdes_phy_type;
6001 struct link_vars temp_vars;
6002 struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
6003
6004 if (CHIP_IS_E3(bp)) {
6005 u16 link_up;
6006 if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
6007 > SPEED_10000) {
6008 /* Check 20G link */
6009 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
6010 1, &link_up);
6011 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
6012 1, &link_up);
6013 link_up &= (1<<2);
6014 } else {
6015 /* Check 10G link and below*/
6016 u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
6017 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
6018 MDIO_WC_REG_GP2_STATUS_GP_2_1,
6019 &gp_status);
6020 gp_status = ((gp_status >> 8) & 0xf) |
6021 ((gp_status >> 12) & 0xf);
6022 link_up = gp_status & (1 << lane);
6023 }
6024 if (!link_up)
6025 return -ESRCH;
6026 } else {
6027 CL22_RD_OVER_CL45(bp, int_phy,
6028 MDIO_REG_BANK_GP_STATUS,
6029 MDIO_GP_STATUS_TOP_AN_STATUS1,
6030 &gp_status);
6031 /* link is up only if both local phy and external phy are up */
6032 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
6033 return -ESRCH;
6034 }
6035 /* In XGXS loopback mode, do not check external PHY */
6036 if (params->loopback_mode == LOOPBACK_XGXS)
6037 return 0;
6038
6039 switch (params->num_phys) {
6040 case 1:
6041 /* No external PHY */
6042 return 0;
6043 case 2:
6044 ext_phy_link_up = params->phy[EXT_PHY1].read_status(
6045 &params->phy[EXT_PHY1],
6046 params, &temp_vars);
6047 break;
6048 case 3: /* Dual Media */
6049 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6050 phy_index++) {
6051 serdes_phy_type = ((params->phy[phy_index].media_type ==
6052 ETH_PHY_SFP_FIBER) ||
6053 (params->phy[phy_index].media_type ==
6054 ETH_PHY_XFP_FIBER) ||
6055 (params->phy[phy_index].media_type ==
6056 ETH_PHY_DA_TWINAX));
6057
6058 if (is_serdes != serdes_phy_type)
6059 continue;
6060 if (params->phy[phy_index].read_status) {
6061 ext_phy_link_up |=
6062 params->phy[phy_index].read_status(
6063 &params->phy[phy_index],
6064 params, &temp_vars);
6065 }
6066 }
6067 break;
6068 }
6069 if (ext_phy_link_up)
6070 return 0;
6071 return -ESRCH;
6072}
6073
6074static int bnx2x_link_initialize(struct link_params *params,
6075 struct link_vars *vars)
6076{
6077 int rc = 0;
6078 u8 phy_index, non_ext_phy;
6079 struct bnx2x *bp = params->bp;
6080 /*
6081 * In case of external phy existence, the line speed would be the
6082 * line speed linked up by the external phy. In case it is direct
6083 * only, then the line_speed during initialization will be
6084 * equal to the req_line_speed
6085 */
6086 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6087
6088 /*
6089 * Initialize the internal phy in case this is a direct board
6090 * (no external phys), or this board has external phy which requires
6091 * to first.
6092 */
6093 if (!USES_WARPCORE(bp))
6094 bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
6095 /* init ext phy and enable link state int */
6096 non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
6097 (params->loopback_mode == LOOPBACK_XGXS));
6098
6099 if (non_ext_phy ||
6100 (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
6101 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
6102 struct bnx2x_phy *phy = &params->phy[INT_PHY];
6103 if (vars->line_speed == SPEED_AUTO_NEG &&
6104 (CHIP_IS_E1x(bp) ||
6105 CHIP_IS_E2(bp)))
6106 bnx2x_set_parallel_detection(phy, params);
6107 if (params->phy[INT_PHY].config_init)
6108 params->phy[INT_PHY].config_init(phy,
6109 params,
6110 vars);
6111 }
6112
6113 /* Init external phy*/
6114 if (non_ext_phy) {
6115 if (params->phy[INT_PHY].supported &
6116 SUPPORTED_FIBRE)
6117 vars->link_status |= LINK_STATUS_SERDES_LINK;
6118 } else {
6119 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6120 phy_index++) {
6121 /*
6122 * No need to initialize second phy in case of first
6123 * phy only selection. In case of second phy, we do
6124 * need to initialize the first phy, since they are
6125 * connected.
6126 */
6127 if (params->phy[phy_index].supported &
6128 SUPPORTED_FIBRE)
6129 vars->link_status |= LINK_STATUS_SERDES_LINK;
6130
6131 if (phy_index == EXT_PHY2 &&
6132 (bnx2x_phy_selection(params) ==
6133 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
6134 DP(NETIF_MSG_LINK, "Not initializing"
6135 " second phy\n");
6136 continue;
6137 }
6138 params->phy[phy_index].config_init(
6139 &params->phy[phy_index],
6140 params, vars);
6141 }
6142 }
6143 /* Reset the interrupt indication after phy was initialized */
6144 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
6145 params->port*4,
6146 (NIG_STATUS_XGXS0_LINK10G |
6147 NIG_STATUS_XGXS0_LINK_STATUS |
6148 NIG_STATUS_SERDES0_LINK_STATUS |
6149 NIG_MASK_MI_INT));
6150 bnx2x_update_mng(params, vars->link_status);
6151 return rc;
6152}
6153
6154static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
6155 struct link_params *params)
6156{
6157 /* reset the SerDes/XGXS */
6158 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6159 (0x1ff << (params->port*16)));
6160}
6161
6162static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
6163 struct link_params *params)
6164{
6165 struct bnx2x *bp = params->bp;
6166 u8 gpio_port;
6167 /* HW reset */
6168 if (CHIP_IS_E2(bp))
6169 gpio_port = BP_PATH(bp);
6170 else
6171 gpio_port = params->port;
6172 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6173 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6174 gpio_port);
6175 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6176 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6177 gpio_port);
6178 DP(NETIF_MSG_LINK, "reset external PHY\n");
6179}
6180
6181static int bnx2x_update_link_down(struct link_params *params,
6182 struct link_vars *vars)
6183{
6184 struct bnx2x *bp = params->bp;
6185 u8 port = params->port;
6186
6187 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6188 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
6189 vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
6190 /* indicate no mac active */
6191 vars->mac_type = MAC_TYPE_NONE;
6192
6193 /* update shared memory */
6194 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
6195 LINK_STATUS_LINK_UP |
6196 LINK_STATUS_PHYSICAL_LINK_FLAG |
6197 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
6198 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
6199 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
6200 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
6201 vars->line_speed = 0;
6202 bnx2x_update_mng(params, vars->link_status);
6203
6204 /* activate nig drain */
6205 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6206
6207 /* disable emac */
6208 if (!CHIP_IS_E3(bp))
6209 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6210
6211 msleep(10);
6212 /* reset BigMac/Xmac */
6213 if (CHIP_IS_E1x(bp) ||
6214 CHIP_IS_E2(bp)) {
6215 bnx2x_bmac_rx_disable(bp, params->port);
6216 REG_WR(bp, GRCBASE_MISC +
6217 MISC_REGISTERS_RESET_REG_2_CLEAR,
6218 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6219 }
6220 if (CHIP_IS_E3(bp))
6221 bnx2x_xmac_disable(params);
6222
6223 return 0;
6224}
6225
6226static int bnx2x_update_link_up(struct link_params *params,
6227 struct link_vars *vars,
6228 u8 link_10g)
6229{
6230 struct bnx2x *bp = params->bp;
6231 u8 port = params->port;
6232 int rc = 0;
6233
6234 vars->link_status |= (LINK_STATUS_LINK_UP |
6235 LINK_STATUS_PHYSICAL_LINK_FLAG);
6236 vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
6237
6238 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
6239 vars->link_status |=
6240 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
6241
6242 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
6243 vars->link_status |=
6244 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
6245 if (USES_WARPCORE(bp)) {
6246 if (link_10g) {
6247 if (bnx2x_xmac_enable(params, vars, 0) ==
6248 -ESRCH) {
6249 DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
6250 vars->link_up = 0;
6251 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
6252 vars->link_status &= ~LINK_STATUS_LINK_UP;
6253 }
6254 } else
6255 bnx2x_umac_enable(params, vars, 0);
6256 bnx2x_set_led(params, vars,
6257 LED_MODE_OPER, vars->line_speed);
6258 }
6259 if ((CHIP_IS_E1x(bp) ||
6260 CHIP_IS_E2(bp))) {
6261 if (link_10g) {
6262 if (bnx2x_bmac_enable(params, vars, 0) ==
6263 -ESRCH) {
6264 DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
6265 vars->link_up = 0;
6266 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
6267 vars->link_status &= ~LINK_STATUS_LINK_UP;
6268 }
6269
6270 bnx2x_set_led(params, vars,
6271 LED_MODE_OPER, SPEED_10000);
6272 } else {
6273 rc = bnx2x_emac_program(params, vars);
6274 bnx2x_emac_enable(params, vars, 0);
6275
6276 /* AN complete? */
6277 if ((vars->link_status &
6278 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
6279 && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
6280 SINGLE_MEDIA_DIRECT(params))
6281 bnx2x_set_gmii_tx_driver(params);
6282 }
6283 }
6284
6285 /* PBF - link up */
6286 if (CHIP_IS_E1x(bp))
6287 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6288 vars->line_speed);
6289
6290 /* disable drain */
6291 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6292
6293 /* update shared memory */
6294 bnx2x_update_mng(params, vars->link_status);
6295 msleep(20);
6296 return rc;
6297}
6298/*
6299 * The bnx2x_link_update function should be called upon link
6300 * interrupt.
6301 * Link is considered up as follows:
6302 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
6303 * to be up
6304 * - SINGLE_MEDIA - The link between the 577xx and the external
6305 * phy (XGXS) need to up as well as the external link of the
6306 * phy (PHY_EXT1)
6307 * - DUAL_MEDIA - The link between the 577xx and the first
6308 * external phy needs to be up, and at least one of the 2
6309 * external phy link must be up.
6310 */
6311int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6312{
6313 struct bnx2x *bp = params->bp;
6314 struct link_vars phy_vars[MAX_PHYS];
6315 u8 port = params->port;
6316 u8 link_10g_plus, phy_index;
6317 u8 ext_phy_link_up = 0, cur_link_up;
6318 int rc = 0;
6319 u8 is_mi_int = 0;
6320 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
6321 u8 active_external_phy = INT_PHY;
6322 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
6323 for (phy_index = INT_PHY; phy_index < params->num_phys;
6324 phy_index++) {
6325 phy_vars[phy_index].flow_ctrl = 0;
6326 phy_vars[phy_index].link_status = 0;
6327 phy_vars[phy_index].line_speed = 0;
6328 phy_vars[phy_index].duplex = DUPLEX_FULL;
6329 phy_vars[phy_index].phy_link_up = 0;
6330 phy_vars[phy_index].link_up = 0;
6331 phy_vars[phy_index].fault_detected = 0;
6332 }
6333
6334 if (USES_WARPCORE(bp))
6335 bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
6336
6337 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
6338 port, (vars->phy_flags & PHY_XGXS_FLAG),
6339 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
6340
6341 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
6342 port*0x18) > 0);
6343 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
6344 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
6345 is_mi_int,
6346 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
6347
6348 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
6349 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6350 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6351
6352 /* disable emac */
6353 if (!CHIP_IS_E3(bp))
6354 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6355
6356 /*
6357 * Step 1:
6358 * Check external link change only for external phys, and apply
6359 * priority selection between them in case the link on both phys
6360 * is up. Note that instead of the common vars, a temporary
6361 * vars argument is used since each phy may have different link/
6362 * speed/duplex result
6363 */
6364 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6365 phy_index++) {
6366 struct bnx2x_phy *phy = &params->phy[phy_index];
6367 if (!phy->read_status)
6368 continue;
6369 /* Read link status and params of this ext phy */
6370 cur_link_up = phy->read_status(phy, params,
6371 &phy_vars[phy_index]);
6372 if (cur_link_up) {
6373 DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
6374 phy_index);
6375 } else {
6376 DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
6377 phy_index);
6378 continue;
6379 }
6380
6381 if (!ext_phy_link_up) {
6382 ext_phy_link_up = 1;
6383 active_external_phy = phy_index;
6384 } else {
6385 switch (bnx2x_phy_selection(params)) {
6386 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
6387 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
6388 /*
6389 * In this option, the first PHY makes sure to pass the
6390 * traffic through itself only.
6391 * Its not clear how to reset the link on the second phy
6392 */
6393 active_external_phy = EXT_PHY1;
6394 break;
6395 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
6396 /*
6397 * In this option, the first PHY makes sure to pass the
6398 * traffic through the second PHY.
6399 */
6400 active_external_phy = EXT_PHY2;
6401 break;
6402 default:
6403 /*
6404 * Link indication on both PHYs with the following cases
6405 * is invalid:
6406 * - FIRST_PHY means that second phy wasn't initialized,
6407 * hence its link is expected to be down
6408 * - SECOND_PHY means that first phy should not be able
6409 * to link up by itself (using configuration)
6410 * - DEFAULT should be overriden during initialiazation
6411 */
6412 DP(NETIF_MSG_LINK, "Invalid link indication"
6413 "mpc=0x%x. DISABLING LINK !!!\n",
6414 params->multi_phy_config);
6415 ext_phy_link_up = 0;
6416 break;
6417 }
6418 }
6419 }
6420 prev_line_speed = vars->line_speed;
6421 /*
6422 * Step 2:
6423 * Read the status of the internal phy. In case of
6424 * DIRECT_SINGLE_MEDIA board, this link is the external link,
6425 * otherwise this is the link between the 577xx and the first
6426 * external phy
6427 */
6428 if (params->phy[INT_PHY].read_status)
6429 params->phy[INT_PHY].read_status(
6430 &params->phy[INT_PHY],
6431 params, vars);
6432 /*
6433 * The INT_PHY flow control reside in the vars. This include the
6434 * case where the speed or flow control are not set to AUTO.
6435 * Otherwise, the active external phy flow control result is set
6436 * to the vars. The ext_phy_line_speed is needed to check if the
6437 * speed is different between the internal phy and external phy.
6438 * This case may be result of intermediate link speed change.
6439 */
6440 if (active_external_phy > INT_PHY) {
6441 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
6442 /*
6443 * Link speed is taken from the XGXS. AN and FC result from
6444 * the external phy.
6445 */
6446 vars->link_status |= phy_vars[active_external_phy].link_status;
6447
6448 /*
6449 * if active_external_phy is first PHY and link is up - disable
6450 * disable TX on second external PHY
6451 */
6452 if (active_external_phy == EXT_PHY1) {
6453 if (params->phy[EXT_PHY2].phy_specific_func) {
6454 DP(NETIF_MSG_LINK, "Disabling TX on"
6455 " EXT_PHY2\n");
6456 params->phy[EXT_PHY2].phy_specific_func(
6457 &params->phy[EXT_PHY2],
6458 params, DISABLE_TX);
6459 }
6460 }
6461
6462 ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
6463 vars->duplex = phy_vars[active_external_phy].duplex;
6464 if (params->phy[active_external_phy].supported &
6465 SUPPORTED_FIBRE)
6466 vars->link_status |= LINK_STATUS_SERDES_LINK;
6467 else
6468 vars->link_status &= ~LINK_STATUS_SERDES_LINK;
6469 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
6470 active_external_phy);
6471 }
6472
6473 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6474 phy_index++) {
6475 if (params->phy[phy_index].flags &
6476 FLAGS_REARM_LATCH_SIGNAL) {
6477 bnx2x_rearm_latch_signal(bp, port,
6478 phy_index ==
6479 active_external_phy);
6480 break;
6481 }
6482 }
6483 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
6484 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
6485 vars->link_status, ext_phy_line_speed);
6486 /*
6487 * Upon link speed change set the NIG into drain mode. Comes to
6488 * deals with possible FIFO glitch due to clk change when speed
6489 * is decreased without link down indicator
6490 */
6491
6492 if (vars->phy_link_up) {
6493 if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
6494 (ext_phy_line_speed != vars->line_speed)) {
6495 DP(NETIF_MSG_LINK, "Internal link speed %d is"
6496 " different than the external"
6497 " link speed %d\n", vars->line_speed,
6498 ext_phy_line_speed);
6499 vars->phy_link_up = 0;
6500 } else if (prev_line_speed != vars->line_speed) {
6501 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
6502 0);
6503 msleep(1);
6504 }
6505 }
6506
6507 /* anything 10 and over uses the bmac */
6508 link_10g_plus = (vars->line_speed >= SPEED_10000);
6509
6510 bnx2x_link_int_ack(params, vars, link_10g_plus);
6511
6512 /*
6513 * In case external phy link is up, and internal link is down
6514 * (not initialized yet probably after link initialization, it
6515 * needs to be initialized.
6516 * Note that after link down-up as result of cable plug, the xgxs
6517 * link would probably become up again without the need
6518 * initialize it
6519 */
6520 if (!(SINGLE_MEDIA_DIRECT(params))) {
6521 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
6522 " init_preceding = %d\n", ext_phy_link_up,
6523 vars->phy_link_up,
6524 params->phy[EXT_PHY1].flags &
6525 FLAGS_INIT_XGXS_FIRST);
6526 if (!(params->phy[EXT_PHY1].flags &
6527 FLAGS_INIT_XGXS_FIRST)
6528 && ext_phy_link_up && !vars->phy_link_up) {
6529 vars->line_speed = ext_phy_line_speed;
6530 if (vars->line_speed < SPEED_1000)
6531 vars->phy_flags |= PHY_SGMII_FLAG;
6532 else
6533 vars->phy_flags &= ~PHY_SGMII_FLAG;
6534
6535 if (params->phy[INT_PHY].config_init)
6536 params->phy[INT_PHY].config_init(
6537 &params->phy[INT_PHY], params,
6538 vars);
6539 }
6540 }
6541 /*
6542 * Link is up only if both local phy and external phy (in case of
6543 * non-direct board) are up and no fault detected on active PHY.
6544 */
6545 vars->link_up = (vars->phy_link_up &&
6546 (ext_phy_link_up ||
6547 SINGLE_MEDIA_DIRECT(params)) &&
6548 (phy_vars[active_external_phy].fault_detected == 0));
6549
6550 if (vars->link_up)
6551 rc = bnx2x_update_link_up(params, vars, link_10g_plus);
6552 else
6553 rc = bnx2x_update_link_down(params, vars);
6554
6555 return rc;
6556}
6557
6558
6559/*****************************************************************************/
6560/* External Phy section */
6561/*****************************************************************************/
6562void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
6563{
6564 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6565 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6566 msleep(1);
6567 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6568 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6569}
6570
6571static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
6572 u32 spirom_ver, u32 ver_addr)
6573{
6574 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
6575 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
6576
6577 if (ver_addr)
6578 REG_WR(bp, ver_addr, spirom_ver);
6579}
6580
6581static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
6582 struct bnx2x_phy *phy,
6583 u8 port)
6584{
6585 u16 fw_ver1, fw_ver2;
6586
6587 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
6588 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6589 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
6590 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
6591 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
6592 phy->ver_addr);
6593}
6594
6595static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
6596 struct bnx2x_phy *phy,
6597 struct link_vars *vars)
6598{
6599 u16 val;
6600 bnx2x_cl45_read(bp, phy,
6601 MDIO_AN_DEVAD,
6602 MDIO_AN_REG_STATUS, &val);
6603 bnx2x_cl45_read(bp, phy,
6604 MDIO_AN_DEVAD,
6605 MDIO_AN_REG_STATUS, &val);
6606 if (val & (1<<5))
6607 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
6608 if ((val & (1<<0)) == 0)
6609 vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
6610}
6611
6612/******************************************************************/
6613/* common BCM8073/BCM8727 PHY SECTION */
6614/******************************************************************/
6615static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
6616 struct link_params *params,
6617 struct link_vars *vars)
6618{
6619 struct bnx2x *bp = params->bp;
6620 if (phy->req_line_speed == SPEED_10 ||
6621 phy->req_line_speed == SPEED_100) {
6622 vars->flow_ctrl = phy->req_flow_ctrl;
6623 return;
6624 }
6625
6626 if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
6627 (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
6628 u16 pause_result;
6629 u16 ld_pause; /* local */
6630 u16 lp_pause; /* link partner */
6631 bnx2x_cl45_read(bp, phy,
6632 MDIO_AN_DEVAD,
6633 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
6634
6635 bnx2x_cl45_read(bp, phy,
6636 MDIO_AN_DEVAD,
6637 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
6638 pause_result = (ld_pause &
6639 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
6640 pause_result |= (lp_pause &
6641 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
6642
6643 bnx2x_pause_resolve(vars, pause_result);
6644 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
6645 pause_result);
6646 }
6647}
6648static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
6649 struct bnx2x_phy *phy,
6650 u8 port)
6651{
6652 u32 count = 0;
6653 u16 fw_ver1, fw_msgout;
6654 int rc = 0;
6655
6656 /* Boot port from external ROM */
6657 /* EDC grst */
6658 bnx2x_cl45_write(bp, phy,
6659 MDIO_PMA_DEVAD,
6660 MDIO_PMA_REG_GEN_CTRL,
6661 0x0001);
6662
6663 /* ucode reboot and rst */
6664 bnx2x_cl45_write(bp, phy,
6665 MDIO_PMA_DEVAD,
6666 MDIO_PMA_REG_GEN_CTRL,
6667 0x008c);
6668
6669 bnx2x_cl45_write(bp, phy,
6670 MDIO_PMA_DEVAD,
6671 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
6672
6673 /* Reset internal microprocessor */
6674 bnx2x_cl45_write(bp, phy,
6675 MDIO_PMA_DEVAD,
6676 MDIO_PMA_REG_GEN_CTRL,
6677 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
6678
6679 /* Release srst bit */
6680 bnx2x_cl45_write(bp, phy,
6681 MDIO_PMA_DEVAD,
6682 MDIO_PMA_REG_GEN_CTRL,
6683 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
6684
6685 /* Delay 100ms per the PHY specifications */
6686 msleep(100);
6687
6688 /* 8073 sometimes taking longer to download */
6689 do {
6690 count++;
6691 if (count > 300) {
6692 DP(NETIF_MSG_LINK,
6693 "bnx2x_8073_8727_external_rom_boot port %x:"
6694 "Download failed. fw version = 0x%x\n",
6695 port, fw_ver1);
6696 rc = -EINVAL;
6697 break;
6698 }
6699
6700 bnx2x_cl45_read(bp, phy,
6701 MDIO_PMA_DEVAD,
6702 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6703 bnx2x_cl45_read(bp, phy,
6704 MDIO_PMA_DEVAD,
6705 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
6706
6707 msleep(1);
6708 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
6709 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
6710 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
6711
6712 /* Clear ser_boot_ctl bit */
6713 bnx2x_cl45_write(bp, phy,
6714 MDIO_PMA_DEVAD,
6715 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
6716 bnx2x_save_bcm_spirom_ver(bp, phy, port);
6717
6718 DP(NETIF_MSG_LINK,
6719 "bnx2x_8073_8727_external_rom_boot port %x:"
6720 "Download complete. fw version = 0x%x\n",
6721 port, fw_ver1);
6722
6723 return rc;
6724}
6725
6726/******************************************************************/
6727/* BCM8073 PHY SECTION */
6728/******************************************************************/
6729static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
6730{
6731 /* This is only required for 8073A1, version 102 only */
6732 u16 val;
6733
6734 /* Read 8073 HW revision*/
6735 bnx2x_cl45_read(bp, phy,
6736 MDIO_PMA_DEVAD,
6737 MDIO_PMA_REG_8073_CHIP_REV, &val);
6738
6739 if (val != 1) {
6740 /* No need to workaround in 8073 A1 */
6741 return 0;
6742 }
6743
6744 bnx2x_cl45_read(bp, phy,
6745 MDIO_PMA_DEVAD,
6746 MDIO_PMA_REG_ROM_VER2, &val);
6747
6748 /* SNR should be applied only for version 0x102 */
6749 if (val != 0x102)
6750 return 0;
6751
6752 return 1;
6753}
6754
6755static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
6756{
6757 u16 val, cnt, cnt1 ;
6758
6759 bnx2x_cl45_read(bp, phy,
6760 MDIO_PMA_DEVAD,
6761 MDIO_PMA_REG_8073_CHIP_REV, &val);
6762
6763 if (val > 0) {
6764 /* No need to workaround in 8073 A1 */
6765 return 0;
6766 }
6767 /* XAUI workaround in 8073 A0: */
6768
6769 /*
6770 * After loading the boot ROM and restarting Autoneg, poll
6771 * Dev1, Reg $C820:
6772 */
6773
6774 for (cnt = 0; cnt < 1000; cnt++) {
6775 bnx2x_cl45_read(bp, phy,
6776 MDIO_PMA_DEVAD,
6777 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
6778 &val);
6779 /*
6780 * If bit [14] = 0 or bit [13] = 0, continue on with
6781 * system initialization (XAUI work-around not required, as
6782 * these bits indicate 2.5G or 1G link up).
6783 */
6784 if (!(val & (1<<14)) || !(val & (1<<13))) {
6785 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
6786 return 0;
6787 } else if (!(val & (1<<15))) {
6788 DP(NETIF_MSG_LINK, "bit 15 went off\n");
6789 /*
6790 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
6791 * MSB (bit15) goes to 1 (indicating that the XAUI
6792 * workaround has completed), then continue on with
6793 * system initialization.
6794 */
6795 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
6796 bnx2x_cl45_read(bp, phy,
6797 MDIO_PMA_DEVAD,
6798 MDIO_PMA_REG_8073_XAUI_WA, &val);
6799 if (val & (1<<15)) {
6800 DP(NETIF_MSG_LINK,
6801 "XAUI workaround has completed\n");
6802 return 0;
6803 }
6804 msleep(3);
6805 }
6806 break;
6807 }
6808 msleep(3);
6809 }
6810 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
6811 return -EINVAL;
6812}
6813
6814static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
6815{
6816 /* Force KR or KX */
6817 bnx2x_cl45_write(bp, phy,
6818 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
6819 bnx2x_cl45_write(bp, phy,
6820 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
6821 bnx2x_cl45_write(bp, phy,
6822 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
6823 bnx2x_cl45_write(bp, phy,
6824 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
6825}
6826
6827static void bnx2x_8073_set_pause_cl37(struct link_params *params,
6828 struct bnx2x_phy *phy,
6829 struct link_vars *vars)
6830{
6831 u16 cl37_val;
6832 struct bnx2x *bp = params->bp;
6833 bnx2x_cl45_read(bp, phy,
6834 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
6835
6836 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
6837 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
6838 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
6839 if ((vars->ieee_fc &
6840 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
6841 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
6842 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
6843 }
6844 if ((vars->ieee_fc &
6845 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
6846 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
6847 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
6848 }
6849 if ((vars->ieee_fc &
6850 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
6851 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
6852 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
6853 }
6854 DP(NETIF_MSG_LINK,
6855 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
6856
6857 bnx2x_cl45_write(bp, phy,
6858 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
6859 msleep(500);
6860}
6861
6862static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
6863 struct link_params *params,
6864 struct link_vars *vars)
6865{
6866 struct bnx2x *bp = params->bp;
6867 u16 val = 0, tmp1;
6868 u8 gpio_port;
6869 DP(NETIF_MSG_LINK, "Init 8073\n");
6870
6871 if (CHIP_IS_E2(bp))
6872 gpio_port = BP_PATH(bp);
6873 else
6874 gpio_port = params->port;
6875 /* Restore normal power mode*/
6876 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6877 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
6878
6879 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6880 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
6881
6882 /* enable LASI */
6883 bnx2x_cl45_write(bp, phy,
6884 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
6885 bnx2x_cl45_write(bp, phy,
6886 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
6887
6888 bnx2x_8073_set_pause_cl37(params, phy, vars);
6889
6890 bnx2x_cl45_read(bp, phy,
6891 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
6892
6893 bnx2x_cl45_read(bp, phy,
6894 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
6895
6896 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
6897
6898 /* Swap polarity if required - Must be done only in non-1G mode */
6899 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
6900 /* Configure the 8073 to swap _P and _N of the KR lines */
6901 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
6902 /* 10G Rx/Tx and 1G Tx signal polarity swap */
6903 bnx2x_cl45_read(bp, phy,
6904 MDIO_PMA_DEVAD,
6905 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
6906 bnx2x_cl45_write(bp, phy,
6907 MDIO_PMA_DEVAD,
6908 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
6909 (val | (3<<9)));
6910 }
6911
6912
6913 /* Enable CL37 BAM */
6914 if (REG_RD(bp, params->shmem_base +
6915 offsetof(struct shmem_region, dev_info.
6916 port_hw_config[params->port].default_cfg)) &
6917 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
6918
6919 bnx2x_cl45_read(bp, phy,
6920 MDIO_AN_DEVAD,
6921 MDIO_AN_REG_8073_BAM, &val);
6922 bnx2x_cl45_write(bp, phy,
6923 MDIO_AN_DEVAD,
6924 MDIO_AN_REG_8073_BAM, val | 1);
6925 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
6926 }
6927 if (params->loopback_mode == LOOPBACK_EXT) {
6928 bnx2x_807x_force_10G(bp, phy);
6929 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
6930 return 0;
6931 } else {
6932 bnx2x_cl45_write(bp, phy,
6933 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
6934 }
6935 if (phy->req_line_speed != SPEED_AUTO_NEG) {
6936 if (phy->req_line_speed == SPEED_10000) {
6937 val = (1<<7);
6938 } else if (phy->req_line_speed == SPEED_2500) {
6939 val = (1<<5);
6940 /*
6941 * Note that 2.5G works only when used with 1G
6942 * advertisement
6943 */
6944 } else
6945 val = (1<<5);
6946 } else {
6947 val = 0;
6948 if (phy->speed_cap_mask &
6949 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
6950 val |= (1<<7);
6951
6952 /* Note that 2.5G works only when used with 1G advertisement */
6953 if (phy->speed_cap_mask &
6954 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
6955 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6956 val |= (1<<5);
6957 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
6958 }
6959
6960 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
6961 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
6962
6963 if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
6964 (phy->req_line_speed == SPEED_AUTO_NEG)) ||
6965 (phy->req_line_speed == SPEED_2500)) {
6966 u16 phy_ver;
6967 /* Allow 2.5G for A1 and above */
6968 bnx2x_cl45_read(bp, phy,
6969 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
6970 &phy_ver);
6971 DP(NETIF_MSG_LINK, "Add 2.5G\n");
6972 if (phy_ver > 0)
6973 tmp1 |= 1;
6974 else
6975 tmp1 &= 0xfffe;
6976 } else {
6977 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
6978 tmp1 &= 0xfffe;
6979 }
6980
6981 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
6982 /* Add support for CL37 (passive mode) II */
6983
6984 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
6985 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
6986 (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
6987 0x20 : 0x40)));
6988
6989 /* Add support for CL37 (passive mode) III */
6990 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
6991
6992 /*
6993 * The SNR will improve about 2db by changing BW and FEE main
6994 * tap. Rest commands are executed after link is up
6995 * Change FFE main cursor to 5 in EDC register
6996 */
6997 if (bnx2x_8073_is_snr_needed(bp, phy))
6998 bnx2x_cl45_write(bp, phy,
6999 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
7000 0xFB0C);
7001
7002 /* Enable FEC (Forware Error Correction) Request in the AN */
7003 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
7004 tmp1 |= (1<<15);
7005 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
7006
7007 bnx2x_ext_phy_set_pause(params, phy, vars);
7008
7009 /* Restart autoneg */
7010 msleep(500);
7011 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
7012 DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
7013 ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
7014 return 0;
7015}
7016
7017static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7018 struct link_params *params,
7019 struct link_vars *vars)
7020{
7021 struct bnx2x *bp = params->bp;
7022 u8 link_up = 0;
7023 u16 val1, val2;
7024 u16 link_status = 0;
7025 u16 an1000_status = 0;
7026
7027 bnx2x_cl45_read(bp, phy,
7028 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
7029
7030 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
7031
7032 /* clear the interrupt LASI status register */
7033 bnx2x_cl45_read(bp, phy,
7034 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
7035 bnx2x_cl45_read(bp, phy,
7036 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
7037 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
7038 /* Clear MSG-OUT */
7039 bnx2x_cl45_read(bp, phy,
7040 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
7041
7042 /* Check the LASI */
7043 bnx2x_cl45_read(bp, phy,
7044 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
7045
7046 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
7047
7048 /* Check the link status */
7049 bnx2x_cl45_read(bp, phy,
7050 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
7051 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
7052
7053 bnx2x_cl45_read(bp, phy,
7054 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
7055 bnx2x_cl45_read(bp, phy,
7056 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
7057 link_up = ((val1 & 4) == 4);
7058 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
7059
7060 if (link_up &&
7061 ((phy->req_line_speed != SPEED_10000))) {
7062 if (bnx2x_8073_xaui_wa(bp, phy) != 0)
7063 return 0;
7064 }
7065 bnx2x_cl45_read(bp, phy,
7066 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
7067 bnx2x_cl45_read(bp, phy,
7068 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
7069
7070 /* Check the link status on 1.1.2 */
7071 bnx2x_cl45_read(bp, phy,
7072 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
7073 bnx2x_cl45_read(bp, phy,
7074 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
7075 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
7076 "an_link_status=0x%x\n", val2, val1, an1000_status);
7077
7078 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
7079 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
7080 /*
7081 * The SNR will improve about 2dbby changing the BW and FEE main
7082 * tap. The 1st write to change FFE main tap is set before
7083 * restart AN. Change PLL Bandwidth in EDC register
7084 */
7085 bnx2x_cl45_write(bp, phy,
7086 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
7087 0x26BC);
7088
7089 /* Change CDR Bandwidth in EDC register */
7090 bnx2x_cl45_write(bp, phy,
7091 MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
7092 0x0333);
7093 }
7094 bnx2x_cl45_read(bp, phy,
7095 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
7096 &link_status);
7097
7098 /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
7099 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
7100 link_up = 1;
7101 vars->line_speed = SPEED_10000;
7102 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
7103 params->port);
7104 } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
7105 link_up = 1;
7106 vars->line_speed = SPEED_2500;
7107 DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
7108 params->port);
7109 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
7110 link_up = 1;
7111 vars->line_speed = SPEED_1000;
7112 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
7113 params->port);
7114 } else {
7115 link_up = 0;
7116 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
7117 params->port);
7118 }
7119
7120 if (link_up) {
7121 /* Swap polarity if required */
7122 if (params->lane_config &
7123 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
7124 /* Configure the 8073 to swap P and N of the KR lines */
7125 bnx2x_cl45_read(bp, phy,
7126 MDIO_XS_DEVAD,
7127 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
7128 /*
7129 * Set bit 3 to invert Rx in 1G mode and clear this bit
7130 * when it`s in 10G mode.
7131 */
7132 if (vars->line_speed == SPEED_1000) {
7133 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
7134 "the 8073\n");
7135 val1 |= (1<<3);
7136 } else
7137 val1 &= ~(1<<3);
7138
7139 bnx2x_cl45_write(bp, phy,
7140 MDIO_XS_DEVAD,
7141 MDIO_XS_REG_8073_RX_CTRL_PCIE,
7142 val1);
7143 }
7144 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
7145 bnx2x_8073_resolve_fc(phy, params, vars);
7146 vars->duplex = DUPLEX_FULL;
7147 }
7148 return link_up;
7149}
7150
7151static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
7152 struct link_params *params)
7153{
7154 struct bnx2x *bp = params->bp;
7155 u8 gpio_port;
7156 if (CHIP_IS_E2(bp))
7157 gpio_port = BP_PATH(bp);
7158 else
7159 gpio_port = params->port;
7160 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
7161 gpio_port);
7162 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7163 MISC_REGISTERS_GPIO_OUTPUT_LOW,
7164 gpio_port);
7165}
7166
7167/******************************************************************/
7168/* BCM8705 PHY SECTION */
7169/******************************************************************/
7170static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
7171 struct link_params *params,
7172 struct link_vars *vars)
7173{
7174 struct bnx2x *bp = params->bp;
7175 DP(NETIF_MSG_LINK, "init 8705\n");
7176 /* Restore normal power mode*/
7177 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7178 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
7179 /* HW reset */
7180 bnx2x_ext_phy_hw_reset(bp, params->port);
7181 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
7182 bnx2x_wait_reset_complete(bp, phy, params);
7183
7184 bnx2x_cl45_write(bp, phy,
7185 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
7186 bnx2x_cl45_write(bp, phy,
7187 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
7188 bnx2x_cl45_write(bp, phy,
7189 MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
7190 bnx2x_cl45_write(bp, phy,
7191 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
7192 /* BCM8705 doesn't have microcode, hence the 0 */
7193 bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
7194 return 0;
7195}
7196
7197static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
7198 struct link_params *params,
7199 struct link_vars *vars)
7200{
7201 u8 link_up = 0;
7202 u16 val1, rx_sd;
7203 struct bnx2x *bp = params->bp;
7204 DP(NETIF_MSG_LINK, "read status 8705\n");
7205 bnx2x_cl45_read(bp, phy,
7206 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
7207 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
7208
7209 bnx2x_cl45_read(bp, phy,
7210 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
7211 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
7212
7213 bnx2x_cl45_read(bp, phy,
7214 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
7215
7216 bnx2x_cl45_read(bp, phy,
7217 MDIO_PMA_DEVAD, 0xc809, &val1);
7218 bnx2x_cl45_read(bp, phy,
7219 MDIO_PMA_DEVAD, 0xc809, &val1);
7220
7221 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
7222 link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
7223 if (link_up) {
7224 vars->line_speed = SPEED_10000;
7225 bnx2x_ext_phy_resolve_fc(phy, params, vars);
7226 }
7227 return link_up;
7228}
7229
7230/******************************************************************/
7231/* SFP+ module Section */
7232/******************************************************************/
7233static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
7234 struct bnx2x_phy *phy,
7235 u8 pmd_dis)
7236{
7237 struct bnx2x *bp = params->bp;
7238 /*
7239 * Disable transmitter only for bootcodes which can enable it afterwards
7240 * (for D3 link)
7241 */
7242 if (pmd_dis) {
7243 if (params->feature_config_flags &
7244 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
7245 DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
7246 else {
7247 DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
7248 return;
7249 }
7250 } else
7251 DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
7252 bnx2x_cl45_write(bp, phy,
7253 MDIO_PMA_DEVAD,
7254 MDIO_PMA_REG_TX_DISABLE, pmd_dis);
7255}
7256
7257static u8 bnx2x_get_gpio_port(struct link_params *params)
7258{
7259 u8 gpio_port;
7260 u32 swap_val, swap_override;
7261 struct bnx2x *bp = params->bp;
7262 if (CHIP_IS_E2(bp))
7263 gpio_port = BP_PATH(bp);
7264 else
7265 gpio_port = params->port;
7266 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7267 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7268 return gpio_port ^ (swap_val && swap_override);
7269}
7270
7271static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
7272 struct bnx2x_phy *phy,
7273 u8 tx_en)
7274{
7275 u16 val;
7276 u8 port = params->port;
7277 struct bnx2x *bp = params->bp;
7278 u32 tx_en_mode;
7279
7280 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
7281 tx_en_mode = REG_RD(bp, params->shmem_base +
7282 offsetof(struct shmem_region,
7283 dev_info.port_hw_config[port].sfp_ctrl)) &
7284 PORT_HW_CFG_TX_LASER_MASK;
7285 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
7286 "mode = %x\n", tx_en, port, tx_en_mode);
7287 switch (tx_en_mode) {
7288 case PORT_HW_CFG_TX_LASER_MDIO:
7289
7290 bnx2x_cl45_read(bp, phy,
7291 MDIO_PMA_DEVAD,
7292 MDIO_PMA_REG_PHY_IDENTIFIER,
7293 &val);
7294
7295 if (tx_en)
7296 val &= ~(1<<15);
7297 else
7298 val |= (1<<15);
7299
7300 bnx2x_cl45_write(bp, phy,
7301 MDIO_PMA_DEVAD,
7302 MDIO_PMA_REG_PHY_IDENTIFIER,
7303 val);
7304 break;
7305 case PORT_HW_CFG_TX_LASER_GPIO0:
7306 case PORT_HW_CFG_TX_LASER_GPIO1:
7307 case PORT_HW_CFG_TX_LASER_GPIO2:
7308 case PORT_HW_CFG_TX_LASER_GPIO3:
7309 {
7310 u16 gpio_pin;
7311 u8 gpio_port, gpio_mode;
7312 if (tx_en)
7313 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
7314 else
7315 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
7316
7317 gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
7318 gpio_port = bnx2x_get_gpio_port(params);
7319 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
7320 break;
7321 }
7322 default:
7323 DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
7324 break;
7325 }
7326}
7327
7328static void bnx2x_sfp_set_transmitter(struct link_params *params,
7329 struct bnx2x_phy *phy,
7330 u8 tx_en)
7331{
7332 struct bnx2x *bp = params->bp;
7333 DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
7334 if (CHIP_IS_E3(bp))
7335 bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
7336 else
7337 bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
7338}
7339
7340static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7341 struct link_params *params,
7342 u16 addr, u8 byte_cnt, u8 *o_buf)
7343{
7344 struct bnx2x *bp = params->bp;
7345 u16 val = 0;
7346 u16 i;
7347 if (byte_cnt > 16) {
7348 DP(NETIF_MSG_LINK, "Reading from eeprom is"
7349 " is limited to 0xf\n");
7350 return -EINVAL;
7351 }
7352 /* Set the read command byte count */
7353 bnx2x_cl45_write(bp, phy,
7354 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
7355 (byte_cnt | 0xa000));
7356
7357 /* Set the read command address */
7358 bnx2x_cl45_write(bp, phy,
7359 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
7360 addr);
7361
7362 /* Activate read command */
7363 bnx2x_cl45_write(bp, phy,
7364 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
7365 0x2c0f);
7366
7367 /* Wait up to 500us for command complete status */
7368 for (i = 0; i < 100; i++) {
7369 bnx2x_cl45_read(bp, phy,
7370 MDIO_PMA_DEVAD,
7371 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
7372 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7373 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
7374 break;
7375 udelay(5);
7376 }
7377
7378 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
7379 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
7380 DP(NETIF_MSG_LINK,
7381 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
7382 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
7383 return -EINVAL;
7384 }
7385
7386 /* Read the buffer */
7387 for (i = 0; i < byte_cnt; i++) {
7388 bnx2x_cl45_read(bp, phy,
7389 MDIO_PMA_DEVAD,
7390 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
7391 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
7392 }
7393
7394 for (i = 0; i < 100; i++) {
7395 bnx2x_cl45_read(bp, phy,
7396 MDIO_PMA_DEVAD,
7397 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
7398 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7399 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7400 return 0;
7401 msleep(1);
7402 }
7403 return -EINVAL;
7404}
7405
7406static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7407 struct link_params *params,
7408 u16 addr, u8 byte_cnt,
7409 u8 *o_buf)
7410{
7411 int rc = 0;
7412 u8 i, j = 0, cnt = 0;
7413 u32 data_array[4];
7414 u16 addr32;
7415 struct bnx2x *bp = params->bp;
7416 /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
7417 " addr %d, cnt %d\n",
7418 addr, byte_cnt);*/
7419 if (byte_cnt > 16) {
7420 DP(NETIF_MSG_LINK, "Reading from eeprom is"
7421 " is limited to 16 bytes\n");
7422 return -EINVAL;
7423 }
7424
7425 /* 4 byte aligned address */
7426 addr32 = addr & (~0x3);
7427 do {
7428 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
7429 data_array);
7430 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
7431
7432 if (rc == 0) {
7433 for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
7434 o_buf[j] = *((u8 *)data_array + i);
7435 j++;
7436 }
7437 }
7438
7439 return rc;
7440}
7441
7442static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7443 struct link_params *params,
7444 u16 addr, u8 byte_cnt, u8 *o_buf)
7445{
7446 struct bnx2x *bp = params->bp;
7447 u16 val, i;
7448
7449 if (byte_cnt > 16) {
7450 DP(NETIF_MSG_LINK, "Reading from eeprom is"
7451 " is limited to 0xf\n");
7452 return -EINVAL;
7453 }
7454
7455 /* Need to read from 1.8000 to clear it */
7456 bnx2x_cl45_read(bp, phy,
7457 MDIO_PMA_DEVAD,
7458 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
7459 &val);
7460
7461 /* Set the read command byte count */
7462 bnx2x_cl45_write(bp, phy,
7463 MDIO_PMA_DEVAD,
7464 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
7465 ((byte_cnt < 2) ? 2 : byte_cnt));
7466
7467 /* Set the read command address */
7468 bnx2x_cl45_write(bp, phy,
7469 MDIO_PMA_DEVAD,
7470 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
7471 addr);
7472 /* Set the destination address */
7473 bnx2x_cl45_write(bp, phy,
7474 MDIO_PMA_DEVAD,
7475 0x8004,
7476 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
7477
7478 /* Activate read command */
7479 bnx2x_cl45_write(bp, phy,
7480 MDIO_PMA_DEVAD,
7481 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
7482 0x8002);
7483 /*
7484 * Wait appropriate time for two-wire command to finish before
7485 * polling the status register
7486 */
7487 msleep(1);
7488
7489 /* Wait up to 500us for command complete status */
7490 for (i = 0; i < 100; i++) {
7491 bnx2x_cl45_read(bp, phy,
7492 MDIO_PMA_DEVAD,
7493 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
7494 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7495 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
7496 break;
7497 udelay(5);
7498 }
7499
7500 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
7501 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
7502 DP(NETIF_MSG_LINK,
7503 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
7504 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
7505 return -EFAULT;
7506 }
7507
7508 /* Read the buffer */
7509 for (i = 0; i < byte_cnt; i++) {
7510 bnx2x_cl45_read(bp, phy,
7511 MDIO_PMA_DEVAD,
7512 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
7513 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
7514 }
7515
7516 for (i = 0; i < 100; i++) {
7517 bnx2x_cl45_read(bp, phy,
7518 MDIO_PMA_DEVAD,
7519 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
7520 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7521 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7522 return 0;
7523 msleep(1);
7524 }
7525
7526 return -EINVAL;
7527}
7528
7529int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7530 struct link_params *params, u16 addr,
7531 u8 byte_cnt, u8 *o_buf)
7532{
7533 int rc = -EINVAL;
7534 switch (phy->type) {
7535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7536 rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
7537 byte_cnt, o_buf);
7538 break;
7539 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7540 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
7541 rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
7542 byte_cnt, o_buf);
7543 break;
7544 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7545 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
7546 byte_cnt, o_buf);
7547 break;
7548 }
7549 return rc;
7550}
7551
7552static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7553 struct link_params *params,
7554 u16 *edc_mode)
7555{
7556 struct bnx2x *bp = params->bp;
7557 u32 sync_offset = 0, phy_idx, media_types;
7558 u8 val, check_limiting_mode = 0;
7559 *edc_mode = EDC_MODE_LIMITING;
7560
7561 phy->media_type = ETH_PHY_UNSPECIFIED;
7562 /* First check for copper cable */
7563 if (bnx2x_read_sfp_module_eeprom(phy,
7564 params,
7565 SFP_EEPROM_CON_TYPE_ADDR,
7566 1,
7567 &val) != 0) {
7568 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
7569 return -EINVAL;
7570 }
7571
7572 switch (val) {
7573 case SFP_EEPROM_CON_TYPE_VAL_COPPER:
7574 {
7575 u8 copper_module_type;
7576 phy->media_type = ETH_PHY_DA_TWINAX;
7577 /*
7578 * Check if its active cable (includes SFP+ module)
7579 * of passive cable
7580 */
7581 if (bnx2x_read_sfp_module_eeprom(phy,
7582 params,
7583 SFP_EEPROM_FC_TX_TECH_ADDR,
7584 1,
7585 &copper_module_type) != 0) {
7586 DP(NETIF_MSG_LINK,
7587 "Failed to read copper-cable-type"
7588 " from SFP+ EEPROM\n");
7589 return -EINVAL;
7590 }
7591
7592 if (copper_module_type &
7593 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
7594 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
7595 check_limiting_mode = 1;
7596 } else if (copper_module_type &
7597 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
7598 DP(NETIF_MSG_LINK, "Passive Copper"
7599 " cable detected\n");
7600 *edc_mode =
7601 EDC_MODE_PASSIVE_DAC;
7602 } else {
7603 DP(NETIF_MSG_LINK, "Unknown copper-cable-"
7604 "type 0x%x !!!\n", copper_module_type);
7605 return -EINVAL;
7606 }
7607 break;
7608 }
7609 case SFP_EEPROM_CON_TYPE_VAL_LC:
7610 phy->media_type = ETH_PHY_SFP_FIBER;
7611 DP(NETIF_MSG_LINK, "Optic module detected\n");
7612 check_limiting_mode = 1;
7613 break;
7614 default:
7615 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
7616 val);
7617 return -EINVAL;
7618 }
7619 sync_offset = params->shmem_base +
7620 offsetof(struct shmem_region,
7621 dev_info.port_hw_config[params->port].media_type);
7622 media_types = REG_RD(bp, sync_offset);
7623 /* Update media type for non-PMF sync */
7624 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
7625 if (&(params->phy[phy_idx]) == phy) {
7626 media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
7627 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
7628 media_types |= ((phy->media_type &
7629 PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
7630 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
7631 break;
7632 }
7633 }
7634 REG_WR(bp, sync_offset, media_types);
7635 if (check_limiting_mode) {
7636 u8 options[SFP_EEPROM_OPTIONS_SIZE];
7637 if (bnx2x_read_sfp_module_eeprom(phy,
7638 params,
7639 SFP_EEPROM_OPTIONS_ADDR,
7640 SFP_EEPROM_OPTIONS_SIZE,
7641 options) != 0) {
7642 DP(NETIF_MSG_LINK, "Failed to read Option"
7643 " field from module EEPROM\n");
7644 return -EINVAL;
7645 }
7646 if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
7647 *edc_mode = EDC_MODE_LINEAR;
7648 else
7649 *edc_mode = EDC_MODE_LIMITING;
7650 }
7651 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
7652 return 0;
7653}
7654/*
7655 * This function read the relevant field from the module (SFP+), and verify it
7656 * is compliant with this board
7657 */
7658static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
7659 struct link_params *params)
7660{
7661 struct bnx2x *bp = params->bp;
7662 u32 val, cmd;
7663 u32 fw_resp, fw_cmd_param;
7664 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
7665 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
7666 phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
7667 val = REG_RD(bp, params->shmem_base +
7668 offsetof(struct shmem_region, dev_info.
7669 port_feature_config[params->port].config));
7670 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
7671 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
7672 DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
7673 return 0;
7674 }
7675
7676 if (params->feature_config_flags &
7677 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
7678 /* Use specific phy request */
7679 cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
7680 } else if (params->feature_config_flags &
7681 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
7682 /* Use first phy request only in case of non-dual media*/
7683 if (DUAL_MEDIA(params)) {
7684 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
7685 "verification\n");
7686 return -EINVAL;
7687 }
7688 cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
7689 } else {
7690 /* No support in OPT MDL detection */
7691 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
7692 "verification\n");
7693 return -EINVAL;
7694 }
7695
7696 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
7697 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
7698 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
7699 DP(NETIF_MSG_LINK, "Approved module\n");
7700 return 0;
7701 }
7702
7703 /* format the warning message */
7704 if (bnx2x_read_sfp_module_eeprom(phy,
7705 params,
7706 SFP_EEPROM_VENDOR_NAME_ADDR,
7707 SFP_EEPROM_VENDOR_NAME_SIZE,
7708 (u8 *)vendor_name))
7709 vendor_name[0] = '\0';
7710 else
7711 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
7712 if (bnx2x_read_sfp_module_eeprom(phy,
7713 params,
7714 SFP_EEPROM_PART_NO_ADDR,
7715 SFP_EEPROM_PART_NO_SIZE,
7716 (u8 *)vendor_pn))
7717 vendor_pn[0] = '\0';
7718 else
7719 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
7720
7721 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
7722 " Port %d from %s part number %s\n",
7723 params->port, vendor_name, vendor_pn);
7724 phy->flags |= FLAGS_SFP_NOT_APPROVED;
7725 return -EINVAL;
7726}
7727
7728static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7729 struct link_params *params)
7730
7731{
7732 u8 val;
7733 struct bnx2x *bp = params->bp;
7734 u16 timeout;
7735 /*
7736 * Initialization time after hot-plug may take up to 300ms for
7737 * some phys type ( e.g. JDSU )
7738 */
7739
7740 for (timeout = 0; timeout < 60; timeout++) {
7741 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
7742 == 0) {
7743 DP(NETIF_MSG_LINK, "SFP+ module initialization "
7744 "took %d ms\n", timeout * 5);
7745 return 0;
7746 }
7747 msleep(5);
7748 }
7749 return -EINVAL;
7750}
7751
7752static void bnx2x_8727_power_module(struct bnx2x *bp,
7753 struct bnx2x_phy *phy,
7754 u8 is_power_up) {
7755 /* Make sure GPIOs are not using for LED mode */
7756 u16 val;
7757 /*
7758 * In the GPIO register, bit 4 is use to determine if the GPIOs are
7759 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
7760 * output
7761 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
7762 * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
7763 * where the 1st bit is the over-current(only input), and 2nd bit is
7764 * for power( only output )
7765 *
7766 * In case of NOC feature is disabled and power is up, set GPIO control
7767 * as input to enable listening of over-current indication
7768 */
7769 if (phy->flags & FLAGS_NOC)
7770 return;
7771 if (is_power_up)
7772 val = (1<<4);
7773 else
7774 /*
7775 * Set GPIO control to OUTPUT, and set the power bit
7776 * to according to the is_power_up
7777 */
7778 val = (1<<1);
7779
7780 bnx2x_cl45_write(bp, phy,
7781 MDIO_PMA_DEVAD,
7782 MDIO_PMA_REG_8727_GPIO_CTRL,
7783 val);
7784}
7785
7786static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
7787 struct bnx2x_phy *phy,
7788 u16 edc_mode)
7789{
7790 u16 cur_limiting_mode;
7791
7792 bnx2x_cl45_read(bp, phy,
7793 MDIO_PMA_DEVAD,
7794 MDIO_PMA_REG_ROM_VER2,
7795 &cur_limiting_mode);
7796 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
7797 cur_limiting_mode);
7798
7799 if (edc_mode == EDC_MODE_LIMITING) {
7800 DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
7801 bnx2x_cl45_write(bp, phy,
7802 MDIO_PMA_DEVAD,
7803 MDIO_PMA_REG_ROM_VER2,
7804 EDC_MODE_LIMITING);
7805 } else { /* LRM mode ( default )*/
7806
7807 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
7808
7809 /*
7810 * Changing to LRM mode takes quite few seconds. So do it only
7811 * if current mode is limiting (default is LRM)
7812 */
7813 if (cur_limiting_mode != EDC_MODE_LIMITING)
7814 return 0;
7815
7816 bnx2x_cl45_write(bp, phy,
7817 MDIO_PMA_DEVAD,
7818 MDIO_PMA_REG_LRM_MODE,
7819 0);
7820 bnx2x_cl45_write(bp, phy,
7821 MDIO_PMA_DEVAD,
7822 MDIO_PMA_REG_ROM_VER2,
7823 0x128);
7824 bnx2x_cl45_write(bp, phy,
7825 MDIO_PMA_DEVAD,
7826 MDIO_PMA_REG_MISC_CTRL0,
7827 0x4008);
7828 bnx2x_cl45_write(bp, phy,
7829 MDIO_PMA_DEVAD,
7830 MDIO_PMA_REG_LRM_MODE,
7831 0xaaaa);
7832 }
7833 return 0;
7834}
7835
7836static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
7837 struct bnx2x_phy *phy,
7838 u16 edc_mode)
7839{
7840 u16 phy_identifier;
7841 u16 rom_ver2_val;
7842 bnx2x_cl45_read(bp, phy,
7843 MDIO_PMA_DEVAD,
7844 MDIO_PMA_REG_PHY_IDENTIFIER,
7845 &phy_identifier);
7846
7847 bnx2x_cl45_write(bp, phy,
7848 MDIO_PMA_DEVAD,
7849 MDIO_PMA_REG_PHY_IDENTIFIER,
7850 (phy_identifier & ~(1<<9)));
7851
7852 bnx2x_cl45_read(bp, phy,
7853 MDIO_PMA_DEVAD,
7854 MDIO_PMA_REG_ROM_VER2,
7855 &rom_ver2_val);
7856 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
7857 bnx2x_cl45_write(bp, phy,
7858 MDIO_PMA_DEVAD,
7859 MDIO_PMA_REG_ROM_VER2,
7860 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
7861
7862 bnx2x_cl45_write(bp, phy,
7863 MDIO_PMA_DEVAD,
7864 MDIO_PMA_REG_PHY_IDENTIFIER,
7865 (phy_identifier | (1<<9)));
7866
7867 return 0;
7868}
7869
7870static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
7871 struct link_params *params,
7872 u32 action)
7873{
7874 struct bnx2x *bp = params->bp;
7875
7876 switch (action) {
7877 case DISABLE_TX:
7878 bnx2x_sfp_set_transmitter(params, phy, 0);
7879 break;
7880 case ENABLE_TX:
7881 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
7882 bnx2x_sfp_set_transmitter(params, phy, 1);
7883 break;
7884 default:
7885 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
7886 action);
7887 return;
7888 }
7889}
7890
7891static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
7892 u8 gpio_mode)
7893{
7894 struct bnx2x *bp = params->bp;
7895
7896 u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
7897 offsetof(struct shmem_region,
7898 dev_info.port_hw_config[params->port].sfp_ctrl)) &
7899 PORT_HW_CFG_FAULT_MODULE_LED_MASK;
7900 switch (fault_led_gpio) {
7901 case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
7902 return;
7903 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
7904 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
7905 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
7906 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
7907 {
7908 u8 gpio_port = bnx2x_get_gpio_port(params);
7909 u16 gpio_pin = fault_led_gpio -
7910 PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
7911 DP(NETIF_MSG_LINK, "Set fault module-detected led "
7912 "pin %x port %x mode %x\n",
7913 gpio_pin, gpio_port, gpio_mode);
7914 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
7915 }
7916 break;
7917 default:
7918 DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
7919 fault_led_gpio);
7920 }
7921}
7922
7923static void bnx2x_set_e3_module_fault_led(struct link_params *params,
7924 u8 gpio_mode)
7925{
7926 u32 pin_cfg;
7927 u8 port = params->port;
7928 struct bnx2x *bp = params->bp;
7929 pin_cfg = (REG_RD(bp, params->shmem_base +
7930 offsetof(struct shmem_region,
7931 dev_info.port_hw_config[port].e3_sfp_ctrl)) &
7932 PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
7933 PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
7934 DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
7935 gpio_mode, pin_cfg);
7936 bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
7937}
7938
7939static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
7940 u8 gpio_mode)
7941{
7942 struct bnx2x *bp = params->bp;
7943 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
7944 if (CHIP_IS_E3(bp)) {
7945 /*
7946 * Low ==> if SFP+ module is supported otherwise
7947 * High ==> if SFP+ module is not on the approved vendor list
7948 */
7949 bnx2x_set_e3_module_fault_led(params, gpio_mode);
7950 } else
7951 bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
7952}
7953
7954static void bnx2x_warpcore_power_module(struct link_params *params,
7955 struct bnx2x_phy *phy,
7956 u8 power)
7957{
7958 u32 pin_cfg;
7959 struct bnx2x *bp = params->bp;
7960
7961 pin_cfg = (REG_RD(bp, params->shmem_base +
7962 offsetof(struct shmem_region,
7963 dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
7964 PORT_HW_CFG_E3_PWR_DIS_MASK) >>
7965 PORT_HW_CFG_E3_PWR_DIS_SHIFT;
7966
7967 if (pin_cfg == PIN_CFG_NA)
7968 return;
7969 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
7970 power, pin_cfg);
7971 /*
7972 * Low ==> corresponding SFP+ module is powered
7973 * high ==> the SFP+ module is powered down
7974 */
7975 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
7976}
7977
7978static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
7979 struct link_params *params)
7980{
7981 bnx2x_warpcore_power_module(params, phy, 0);
7982}
7983
7984static void bnx2x_power_sfp_module(struct link_params *params,
7985 struct bnx2x_phy *phy,
7986 u8 power)
7987{
7988 struct bnx2x *bp = params->bp;
7989 DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
7990
7991 switch (phy->type) {
7992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
7994 bnx2x_8727_power_module(params->bp, phy, power);
7995 break;
7996 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7997 bnx2x_warpcore_power_module(params, phy, power);
7998 break;
7999 default:
8000 break;
8001 }
8002}
8003static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
8004 struct bnx2x_phy *phy,
8005 u16 edc_mode)
8006{
8007 u16 val = 0;
8008 u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
8009 struct bnx2x *bp = params->bp;
8010
8011 u8 lane = bnx2x_get_warpcore_lane(phy, params);
8012 /* This is a global register which controls all lanes */
8013 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
8014 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
8015 val &= ~(0xf << (lane << 2));
8016
8017 switch (edc_mode) {
8018 case EDC_MODE_LINEAR:
8019 case EDC_MODE_LIMITING:
8020 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
8021 break;
8022 case EDC_MODE_PASSIVE_DAC:
8023 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
8024 break;
8025 default:
8026 break;
8027 }
8028
8029 val |= (mode << (lane << 2));
8030 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
8031 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
8032 /* A must read */
8033 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
8034 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
8035
8036 /* Restart microcode to re-read the new mode */
8037 bnx2x_warpcore_reset_lane(bp, phy, 1);
8038 bnx2x_warpcore_reset_lane(bp, phy, 0);
8039
8040}
8041
8042static void bnx2x_set_limiting_mode(struct link_params *params,
8043 struct bnx2x_phy *phy,
8044 u16 edc_mode)
8045{
8046 switch (phy->type) {
8047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8048 bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
8049 break;
8050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
8052 bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
8053 break;
8054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8055 bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
8056 break;
8057 }
8058}
8059
8060int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8061 struct link_params *params)
8062{
8063 struct bnx2x *bp = params->bp;
8064 u16 edc_mode;
8065 int rc = 0;
8066
8067 u32 val = REG_RD(bp, params->shmem_base +
8068 offsetof(struct shmem_region, dev_info.
8069 port_feature_config[params->port].config));
8070
8071 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
8072 params->port);
8073 /* Power up module */
8074 bnx2x_power_sfp_module(params, phy, 1);
8075 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
8076 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
8077 return -EINVAL;
8078 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
8079 /* check SFP+ module compatibility */
8080 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
8081 rc = -EINVAL;
8082 /* Turn on fault module-detected led */
8083 bnx2x_set_sfp_module_fault_led(params,
8084 MISC_REGISTERS_GPIO_HIGH);
8085
8086 /* Check if need to power down the SFP+ module */
8087 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
8088 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
8089 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
8090 bnx2x_power_sfp_module(params, phy, 0);
8091 return rc;
8092 }
8093 } else {
8094 /* Turn off fault module-detected led */
8095 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
8096 }
8097
8098 /*
8099 * Check and set limiting mode / LRM mode on 8726. On 8727 it
8100 * is done automatically
8101 */
8102 bnx2x_set_limiting_mode(params, phy, edc_mode);
8103
8104 /*
8105 * Enable transmit for this module if the module is approved, or
8106 * if unapproved modules should also enable the Tx laser
8107 */
8108 if (rc == 0 ||
8109 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
8110 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
8111 bnx2x_sfp_set_transmitter(params, phy, 1);
8112 else
8113 bnx2x_sfp_set_transmitter(params, phy, 0);
8114
8115 return rc;
8116}
8117
8118void bnx2x_handle_module_detect_int(struct link_params *params)
8119{
8120 struct bnx2x *bp = params->bp;
8121 struct bnx2x_phy *phy;
8122 u32 gpio_val;
8123 u8 gpio_num, gpio_port;
8124 if (CHIP_IS_E3(bp))
8125 phy = &params->phy[INT_PHY];
8126 else
8127 phy = &params->phy[EXT_PHY1];
8128
8129 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
8130 params->port, &gpio_num, &gpio_port) ==
8131 -EINVAL) {
8132 DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
8133 return;
8134 }
8135
8136 /* Set valid module led off */
8137 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
8138
8139 /* Get current gpio val reflecting module plugged in / out*/
8140 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
8141
8142 /* Call the handling function in case module is detected */
8143 if (gpio_val == 0) {
8144 bnx2x_power_sfp_module(params, phy, 1);
8145 bnx2x_set_gpio_int(bp, gpio_num,
8146 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
8147 gpio_port);
8148 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
8149 bnx2x_sfp_module_detection(phy, params);
8150 else
8151 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
8152 } else {
8153 u32 val = REG_RD(bp, params->shmem_base +
8154 offsetof(struct shmem_region, dev_info.
8155 port_feature_config[params->port].
8156 config));
8157 bnx2x_set_gpio_int(bp, gpio_num,
8158 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
8159 gpio_port);
8160 /*
8161 * Module was plugged out.
8162 * Disable transmit for this module
8163 */
8164 phy->media_type = ETH_PHY_NOT_PRESENT;
8165 if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
8166 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
8167 CHIP_IS_E3(bp))
8168 bnx2x_sfp_set_transmitter(params, phy, 0);
8169 }
8170}
8171
8172/******************************************************************/
8173/* Used by 8706 and 8727 */
8174/******************************************************************/
8175static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
8176 struct bnx2x_phy *phy,
8177 u16 alarm_status_offset,
8178 u16 alarm_ctrl_offset)
8179{
8180 u16 alarm_status, val;
8181 bnx2x_cl45_read(bp, phy,
8182 MDIO_PMA_DEVAD, alarm_status_offset,
8183 &alarm_status);
8184 bnx2x_cl45_read(bp, phy,
8185 MDIO_PMA_DEVAD, alarm_status_offset,
8186 &alarm_status);
8187 /* Mask or enable the fault event. */
8188 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
8189 if (alarm_status & (1<<0))
8190 val &= ~(1<<0);
8191 else
8192 val |= (1<<0);
8193 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
8194}
8195/******************************************************************/
8196/* common BCM8706/BCM8726 PHY SECTION */
8197/******************************************************************/
8198static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
8199 struct link_params *params,
8200 struct link_vars *vars)
8201{
8202 u8 link_up = 0;
8203 u16 val1, val2, rx_sd, pcs_status;
8204 struct bnx2x *bp = params->bp;
8205 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
8206 /* Clear RX Alarm*/
8207 bnx2x_cl45_read(bp, phy,
8208 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
8209
8210 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
8211 MDIO_PMA_LASI_TXCTRL);
8212
8213 /* clear LASI indication*/
8214 bnx2x_cl45_read(bp, phy,
8215 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
8216 bnx2x_cl45_read(bp, phy,
8217 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
8218 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
8219
8220 bnx2x_cl45_read(bp, phy,
8221 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
8222 bnx2x_cl45_read(bp, phy,
8223 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
8224 bnx2x_cl45_read(bp, phy,
8225 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
8226 bnx2x_cl45_read(bp, phy,
8227 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
8228
8229 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
8230 " link_status 0x%x\n", rx_sd, pcs_status, val2);
8231 /*
8232 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
8233 * are set, or if the autoneg bit 1 is set
8234 */
8235 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
8236 if (link_up) {
8237 if (val2 & (1<<1))
8238 vars->line_speed = SPEED_1000;
8239 else
8240 vars->line_speed = SPEED_10000;
8241 bnx2x_ext_phy_resolve_fc(phy, params, vars);
8242 vars->duplex = DUPLEX_FULL;
8243 }
8244
8245 /* Capture 10G link fault. Read twice to clear stale value. */
8246 if (vars->line_speed == SPEED_10000) {
8247 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
8248 MDIO_PMA_LASI_TXSTAT, &val1);
8249 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
8250 MDIO_PMA_LASI_TXSTAT, &val1);
8251 if (val1 & (1<<0))
8252 vars->fault_detected = 1;
8253 }
8254
8255 return link_up;
8256}
8257
8258/******************************************************************/
8259/* BCM8706 PHY SECTION */
8260/******************************************************************/
8261static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
8262 struct link_params *params,
8263 struct link_vars *vars)
8264{
8265 u32 tx_en_mode;
8266 u16 cnt, val, tmp1;
8267 struct bnx2x *bp = params->bp;
8268
8269 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
8270 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
8271 /* HW reset */
8272 bnx2x_ext_phy_hw_reset(bp, params->port);
8273 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
8274 bnx2x_wait_reset_complete(bp, phy, params);
8275
8276 /* Wait until fw is loaded */
8277 for (cnt = 0; cnt < 100; cnt++) {
8278 bnx2x_cl45_read(bp, phy,
8279 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
8280 if (val)
8281 break;
8282 msleep(10);
8283 }
8284 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
8285 if ((params->feature_config_flags &
8286 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
8287 u8 i;
8288 u16 reg;
8289 for (i = 0; i < 4; i++) {
8290 reg = MDIO_XS_8706_REG_BANK_RX0 +
8291 i*(MDIO_XS_8706_REG_BANK_RX1 -
8292 MDIO_XS_8706_REG_BANK_RX0);
8293 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
8294 /* Clear first 3 bits of the control */
8295 val &= ~0x7;
8296 /* Set control bits according to configuration */
8297 val |= (phy->rx_preemphasis[i] & 0x7);
8298 DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
8299 " reg 0x%x <-- val 0x%x\n", reg, val);
8300 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
8301 }
8302 }
8303 /* Force speed */
8304 if (phy->req_line_speed == SPEED_10000) {
8305 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
8306
8307 bnx2x_cl45_write(bp, phy,
8308 MDIO_PMA_DEVAD,
8309 MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
8310 bnx2x_cl45_write(bp, phy,
8311 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
8312 0);
8313 /* Arm LASI for link and Tx fault. */
8314 bnx2x_cl45_write(bp, phy,
8315 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
8316 } else {
8317 /* Force 1Gbps using autoneg with 1G advertisement */
8318
8319 /* Allow CL37 through CL73 */
8320 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
8321 bnx2x_cl45_write(bp, phy,
8322 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
8323
8324 /* Enable Full-Duplex advertisement on CL37 */
8325 bnx2x_cl45_write(bp, phy,
8326 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
8327 /* Enable CL37 AN */
8328 bnx2x_cl45_write(bp, phy,
8329 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
8330 /* 1G support */
8331 bnx2x_cl45_write(bp, phy,
8332 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
8333
8334 /* Enable clause 73 AN */
8335 bnx2x_cl45_write(bp, phy,
8336 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
8337 bnx2x_cl45_write(bp, phy,
8338 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8339 0x0400);
8340 bnx2x_cl45_write(bp, phy,
8341 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
8342 0x0004);
8343 }
8344 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
8345
8346 /*
8347 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
8348 * power mode, if TX Laser is disabled
8349 */
8350
8351 tx_en_mode = REG_RD(bp, params->shmem_base +
8352 offsetof(struct shmem_region,
8353 dev_info.port_hw_config[params->port].sfp_ctrl))
8354 & PORT_HW_CFG_TX_LASER_MASK;
8355
8356 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
8357 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
8358 bnx2x_cl45_read(bp, phy,
8359 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
8360 tmp1 |= 0x1;
8361 bnx2x_cl45_write(bp, phy,
8362 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
8363 }
8364
8365 return 0;
8366}
8367
8368static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
8369 struct link_params *params,
8370 struct link_vars *vars)
8371{
8372 return bnx2x_8706_8726_read_status(phy, params, vars);
8373}
8374
8375/******************************************************************/
8376/* BCM8726 PHY SECTION */
8377/******************************************************************/
8378static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
8379 struct link_params *params)
8380{
8381 struct bnx2x *bp = params->bp;
8382 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
8383 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
8384}
8385
8386static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
8387 struct link_params *params)
8388{
8389 struct bnx2x *bp = params->bp;
8390 /* Need to wait 100ms after reset */
8391 msleep(100);
8392
8393 /* Micro controller re-boot */
8394 bnx2x_cl45_write(bp, phy,
8395 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
8396
8397 /* Set soft reset */
8398 bnx2x_cl45_write(bp, phy,
8399 MDIO_PMA_DEVAD,
8400 MDIO_PMA_REG_GEN_CTRL,
8401 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
8402
8403 bnx2x_cl45_write(bp, phy,
8404 MDIO_PMA_DEVAD,
8405 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
8406
8407 bnx2x_cl45_write(bp, phy,
8408 MDIO_PMA_DEVAD,
8409 MDIO_PMA_REG_GEN_CTRL,
8410 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
8411
8412 /* wait for 150ms for microcode load */
8413 msleep(150);
8414
8415 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
8416 bnx2x_cl45_write(bp, phy,
8417 MDIO_PMA_DEVAD,
8418 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
8419
8420 msleep(200);
8421 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
8422}
8423
8424static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
8425 struct link_params *params,
8426 struct link_vars *vars)
8427{
8428 struct bnx2x *bp = params->bp;
8429 u16 val1;
8430 u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
8431 if (link_up) {
8432 bnx2x_cl45_read(bp, phy,
8433 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
8434 &val1);
8435 if (val1 & (1<<15)) {
8436 DP(NETIF_MSG_LINK, "Tx is disabled\n");
8437 link_up = 0;
8438 vars->line_speed = 0;
8439 }
8440 }
8441 return link_up;
8442}
8443
8444
8445static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8446 struct link_params *params,
8447 struct link_vars *vars)
8448{
8449 struct bnx2x *bp = params->bp;
8450 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
8451
8452 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
8453 bnx2x_wait_reset_complete(bp, phy, params);
8454
8455 bnx2x_8726_external_rom_boot(phy, params);
8456
8457 /*
8458 * Need to call module detected on initialization since the module
8459 * detection triggered by actual module insertion might occur before
8460 * driver is loaded, and when driver is loaded, it reset all
8461 * registers, including the transmitter
8462 */
8463 bnx2x_sfp_module_detection(phy, params);
8464
8465 if (phy->req_line_speed == SPEED_1000) {
8466 DP(NETIF_MSG_LINK, "Setting 1G force\n");
8467 bnx2x_cl45_write(bp, phy,
8468 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
8469 bnx2x_cl45_write(bp, phy,
8470 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
8471 bnx2x_cl45_write(bp, phy,
8472 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
8473 bnx2x_cl45_write(bp, phy,
8474 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8475 0x400);
8476 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
8477 (phy->speed_cap_mask &
8478 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
8479 ((phy->speed_cap_mask &
8480 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
8481 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
8482 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
8483 /* Set Flow control */
8484 bnx2x_ext_phy_set_pause(params, phy, vars);
8485 bnx2x_cl45_write(bp, phy,
8486 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
8487 bnx2x_cl45_write(bp, phy,
8488 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
8489 bnx2x_cl45_write(bp, phy,
8490 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
8491 bnx2x_cl45_write(bp, phy,
8492 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
8493 bnx2x_cl45_write(bp, phy,
8494 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
8495 /*
8496 * Enable RX-ALARM control to receive interrupt for 1G speed
8497 * change
8498 */
8499 bnx2x_cl45_write(bp, phy,
8500 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
8501 bnx2x_cl45_write(bp, phy,
8502 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8503 0x400);
8504
8505 } else { /* Default 10G. Set only LASI control */
8506 bnx2x_cl45_write(bp, phy,
8507 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
8508 }
8509
8510 /* Set TX PreEmphasis if needed */
8511 if ((params->feature_config_flags &
8512 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
8513 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
8514 "TX_CTRL2 0x%x\n",
8515 phy->tx_preemphasis[0],
8516 phy->tx_preemphasis[1]);
8517 bnx2x_cl45_write(bp, phy,
8518 MDIO_PMA_DEVAD,
8519 MDIO_PMA_REG_8726_TX_CTRL1,
8520 phy->tx_preemphasis[0]);
8521
8522 bnx2x_cl45_write(bp, phy,
8523 MDIO_PMA_DEVAD,
8524 MDIO_PMA_REG_8726_TX_CTRL2,
8525 phy->tx_preemphasis[1]);
8526 }
8527
8528 return 0;
8529
8530}
8531
8532static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
8533 struct link_params *params)
8534{
8535 struct bnx2x *bp = params->bp;
8536 DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
8537 /* Set serial boot control for external load */
8538 bnx2x_cl45_write(bp, phy,
8539 MDIO_PMA_DEVAD,
8540 MDIO_PMA_REG_GEN_CTRL, 0x0001);
8541}
8542
8543/******************************************************************/
8544/* BCM8727 PHY SECTION */
8545/******************************************************************/
8546
8547static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
8548 struct link_params *params, u8 mode)
8549{
8550 struct bnx2x *bp = params->bp;
8551 u16 led_mode_bitmask = 0;
8552 u16 gpio_pins_bitmask = 0;
8553 u16 val;
8554 /* Only NOC flavor requires to set the LED specifically */
8555 if (!(phy->flags & FLAGS_NOC))
8556 return;
8557 switch (mode) {
8558 case LED_MODE_FRONT_PANEL_OFF:
8559 case LED_MODE_OFF:
8560 led_mode_bitmask = 0;
8561 gpio_pins_bitmask = 0x03;
8562 break;
8563 case LED_MODE_ON:
8564 led_mode_bitmask = 0;
8565 gpio_pins_bitmask = 0x02;
8566 break;
8567 case LED_MODE_OPER:
8568 led_mode_bitmask = 0x60;
8569 gpio_pins_bitmask = 0x11;
8570 break;
8571 }
8572 bnx2x_cl45_read(bp, phy,
8573 MDIO_PMA_DEVAD,
8574 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8575 &val);
8576 val &= 0xff8f;
8577 val |= led_mode_bitmask;
8578 bnx2x_cl45_write(bp, phy,
8579 MDIO_PMA_DEVAD,
8580 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8581 val);
8582 bnx2x_cl45_read(bp, phy,
8583 MDIO_PMA_DEVAD,
8584 MDIO_PMA_REG_8727_GPIO_CTRL,
8585 &val);
8586 val &= 0xffe0;
8587 val |= gpio_pins_bitmask;
8588 bnx2x_cl45_write(bp, phy,
8589 MDIO_PMA_DEVAD,
8590 MDIO_PMA_REG_8727_GPIO_CTRL,
8591 val);
8592}
8593static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
8594 struct link_params *params) {
8595 u32 swap_val, swap_override;
8596 u8 port;
8597 /*
8598 * The PHY reset is controlled by GPIO 1. Fake the port number
8599 * to cancel the swap done in set_gpio()
8600 */
8601 struct bnx2x *bp = params->bp;
8602 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8603 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8604 port = (swap_val && swap_override) ^ 1;
8605 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
8606 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
8607}
8608
8609static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8610 struct link_params *params,
8611 struct link_vars *vars)
8612{
8613 u32 tx_en_mode;
8614 u16 tmp1, val, mod_abs, tmp2;
8615 u16 rx_alarm_ctrl_val;
8616 u16 lasi_ctrl_val;
8617 struct bnx2x *bp = params->bp;
8618 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
8619
8620 bnx2x_wait_reset_complete(bp, phy, params);
8621 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
8622 /* Should be 0x6 to enable XS on Tx side. */
8623 lasi_ctrl_val = 0x0006;
8624
8625 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
8626 /* enable LASI */
8627 bnx2x_cl45_write(bp, phy,
8628 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8629 rx_alarm_ctrl_val);
8630 bnx2x_cl45_write(bp, phy,
8631 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
8632 0);
8633 bnx2x_cl45_write(bp, phy,
8634 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
8635
8636 /*
8637 * Initially configure MOD_ABS to interrupt when module is
8638 * presence( bit 8)
8639 */
8640 bnx2x_cl45_read(bp, phy,
8641 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
8642 /*
8643 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
8644 * When the EDC is off it locks onto a reference clock and avoids
8645 * becoming 'lost'
8646 */
8647 mod_abs &= ~(1<<8);
8648 if (!(phy->flags & FLAGS_NOC))
8649 mod_abs &= ~(1<<9);
8650 bnx2x_cl45_write(bp, phy,
8651 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
8652
8653
8654 /* Enable/Disable PHY transmitter output */
8655 bnx2x_set_disable_pmd_transmit(params, phy, 0);
8656
8657 /* Make MOD_ABS give interrupt on change */
8658 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8659 &val);
8660 val |= (1<<12);
8661 if (phy->flags & FLAGS_NOC)
8662 val |= (3<<5);
8663
8664 /*
8665 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
8666 * status which reflect SFP+ module over-current
8667 */
8668 if (!(phy->flags & FLAGS_NOC))
8669 val &= 0xff8f; /* Reset bits 4-6 */
8670 bnx2x_cl45_write(bp, phy,
8671 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
8672
8673 bnx2x_8727_power_module(bp, phy, 1);
8674
8675 bnx2x_cl45_read(bp, phy,
8676 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
8677
8678 bnx2x_cl45_read(bp, phy,
8679 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
8680
8681 /* Set option 1G speed */
8682 if (phy->req_line_speed == SPEED_1000) {
8683 DP(NETIF_MSG_LINK, "Setting 1G force\n");
8684 bnx2x_cl45_write(bp, phy,
8685 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
8686 bnx2x_cl45_write(bp, phy,
8687 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
8688 bnx2x_cl45_read(bp, phy,
8689 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
8690 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
8691 /*
8692 * Power down the XAUI until link is up in case of dual-media
8693 * and 1G
8694 */
8695 if (DUAL_MEDIA(params)) {
8696 bnx2x_cl45_read(bp, phy,
8697 MDIO_PMA_DEVAD,
8698 MDIO_PMA_REG_8727_PCS_GP, &val);
8699 val |= (3<<10);
8700 bnx2x_cl45_write(bp, phy,
8701 MDIO_PMA_DEVAD,
8702 MDIO_PMA_REG_8727_PCS_GP, val);
8703 }
8704 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
8705 ((phy->speed_cap_mask &
8706 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
8707 ((phy->speed_cap_mask &
8708 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
8709 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
8710
8711 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
8712 bnx2x_cl45_write(bp, phy,
8713 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
8714 bnx2x_cl45_write(bp, phy,
8715 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
8716 } else {
8717 /*
8718 * Since the 8727 has only single reset pin, need to set the 10G
8719 * registers although it is default
8720 */
8721 bnx2x_cl45_write(bp, phy,
8722 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
8723 0x0020);
8724 bnx2x_cl45_write(bp, phy,
8725 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
8726 bnx2x_cl45_write(bp, phy,
8727 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
8728 bnx2x_cl45_write(bp, phy,
8729 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
8730 0x0008);
8731 }
8732
8733 /*
8734 * Set 2-wire transfer rate of SFP+ module EEPROM
8735 * to 100Khz since some DACs(direct attached cables) do
8736 * not work at 400Khz.
8737 */
8738 bnx2x_cl45_write(bp, phy,
8739 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
8740 0xa001);
8741
8742 /* Set TX PreEmphasis if needed */
8743 if ((params->feature_config_flags &
8744 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
8745 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
8746 phy->tx_preemphasis[0],
8747 phy->tx_preemphasis[1]);
8748 bnx2x_cl45_write(bp, phy,
8749 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
8750 phy->tx_preemphasis[0]);
8751
8752 bnx2x_cl45_write(bp, phy,
8753 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
8754 phy->tx_preemphasis[1]);
8755 }
8756
8757 /*
8758 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
8759 * power mode, if TX Laser is disabled
8760 */
8761 tx_en_mode = REG_RD(bp, params->shmem_base +
8762 offsetof(struct shmem_region,
8763 dev_info.port_hw_config[params->port].sfp_ctrl))
8764 & PORT_HW_CFG_TX_LASER_MASK;
8765
8766 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
8767
8768 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
8769 bnx2x_cl45_read(bp, phy,
8770 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
8771 tmp2 |= 0x1000;
8772 tmp2 &= 0xFFEF;
8773 bnx2x_cl45_write(bp, phy,
8774 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
8775 }
8776
8777 return 0;
8778}
8779
8780static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
8781 struct link_params *params)
8782{
8783 struct bnx2x *bp = params->bp;
8784 u16 mod_abs, rx_alarm_status;
8785 u32 val = REG_RD(bp, params->shmem_base +
8786 offsetof(struct shmem_region, dev_info.
8787 port_feature_config[params->port].
8788 config));
8789 bnx2x_cl45_read(bp, phy,
8790 MDIO_PMA_DEVAD,
8791 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
8792 if (mod_abs & (1<<8)) {
8793
8794 /* Module is absent */
8795 DP(NETIF_MSG_LINK, "MOD_ABS indication "
8796 "show module is absent\n");
8797 phy->media_type = ETH_PHY_NOT_PRESENT;
8798 /*
8799 * 1. Set mod_abs to detect next module
8800 * presence event
8801 * 2. Set EDC off by setting OPTXLOS signal input to low
8802 * (bit 9).
8803 * When the EDC is off it locks onto a reference clock and
8804 * avoids becoming 'lost'.
8805 */
8806 mod_abs &= ~(1<<8);
8807 if (!(phy->flags & FLAGS_NOC))
8808 mod_abs &= ~(1<<9);
8809 bnx2x_cl45_write(bp, phy,
8810 MDIO_PMA_DEVAD,
8811 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
8812
8813 /*
8814 * Clear RX alarm since it stays up as long as
8815 * the mod_abs wasn't changed
8816 */
8817 bnx2x_cl45_read(bp, phy,
8818 MDIO_PMA_DEVAD,
8819 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
8820
8821 } else {
8822 /* Module is present */
8823 DP(NETIF_MSG_LINK, "MOD_ABS indication "
8824 "show module is present\n");
8825 /*
8826 * First disable transmitter, and if the module is ok, the
8827 * module_detection will enable it
8828 * 1. Set mod_abs to detect next module absent event ( bit 8)
8829 * 2. Restore the default polarity of the OPRXLOS signal and
8830 * this signal will then correctly indicate the presence or
8831 * absence of the Rx signal. (bit 9)
8832 */
8833 mod_abs |= (1<<8);
8834 if (!(phy->flags & FLAGS_NOC))
8835 mod_abs |= (1<<9);
8836 bnx2x_cl45_write(bp, phy,
8837 MDIO_PMA_DEVAD,
8838 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
8839
8840 /*
8841 * Clear RX alarm since it stays up as long as the mod_abs
8842 * wasn't changed. This is need to be done before calling the
8843 * module detection, otherwise it will clear* the link update
8844 * alarm
8845 */
8846 bnx2x_cl45_read(bp, phy,
8847 MDIO_PMA_DEVAD,
8848 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
8849
8850
8851 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
8852 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
8853 bnx2x_sfp_set_transmitter(params, phy, 0);
8854
8855 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
8856 bnx2x_sfp_module_detection(phy, params);
8857 else
8858 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
8859 }
8860
8861 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
8862 rx_alarm_status);
8863 /* No need to check link status in case of module plugged in/out */
8864}
8865
8866static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
8867 struct link_params *params,
8868 struct link_vars *vars)
8869
8870{
8871 struct bnx2x *bp = params->bp;
8872 u8 link_up = 0, oc_port = params->port;
8873 u16 link_status = 0;
8874 u16 rx_alarm_status, lasi_ctrl, val1;
8875
8876 /* If PHY is not initialized, do not check link status */
8877 bnx2x_cl45_read(bp, phy,
8878 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
8879 &lasi_ctrl);
8880 if (!lasi_ctrl)
8881 return 0;
8882
8883 /* Check the LASI on Rx */
8884 bnx2x_cl45_read(bp, phy,
8885 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
8886 &rx_alarm_status);
8887 vars->line_speed = 0;
8888 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
8889
8890 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
8891 MDIO_PMA_LASI_TXCTRL);
8892
8893 bnx2x_cl45_read(bp, phy,
8894 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
8895
8896 DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
8897
8898 /* Clear MSG-OUT */
8899 bnx2x_cl45_read(bp, phy,
8900 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
8901
8902 /*
8903 * If a module is present and there is need to check
8904 * for over current
8905 */
8906 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
8907 /* Check over-current using 8727 GPIO0 input*/
8908 bnx2x_cl45_read(bp, phy,
8909 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
8910 &val1);
8911
8912 if ((val1 & (1<<8)) == 0) {
8913 if (!CHIP_IS_E1x(bp))
8914 oc_port = BP_PATH(bp) + (params->port << 1);
8915 DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
8916 " on port %d\n", oc_port);
8917 netdev_err(bp->dev, "Error: Power fault on Port %d has"
8918 " been detected and the power to "
8919 "that SFP+ module has been removed"
8920 " to prevent failure of the card."
8921 " Please remove the SFP+ module and"
8922 " restart the system to clear this"
8923 " error.\n",
8924 oc_port);
8925 /* Disable all RX_ALARMs except for mod_abs */
8926 bnx2x_cl45_write(bp, phy,
8927 MDIO_PMA_DEVAD,
8928 MDIO_PMA_LASI_RXCTRL, (1<<5));
8929
8930 bnx2x_cl45_read(bp, phy,
8931 MDIO_PMA_DEVAD,
8932 MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
8933 /* Wait for module_absent_event */
8934 val1 |= (1<<8);
8935 bnx2x_cl45_write(bp, phy,
8936 MDIO_PMA_DEVAD,
8937 MDIO_PMA_REG_PHY_IDENTIFIER, val1);
8938 /* Clear RX alarm */
8939 bnx2x_cl45_read(bp, phy,
8940 MDIO_PMA_DEVAD,
8941 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
8942 return 0;
8943 }
8944 } /* Over current check */
8945
8946 /* When module absent bit is set, check module */
8947 if (rx_alarm_status & (1<<5)) {
8948 bnx2x_8727_handle_mod_abs(phy, params);
8949 /* Enable all mod_abs and link detection bits */
8950 bnx2x_cl45_write(bp, phy,
8951 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8952 ((1<<5) | (1<<2)));
8953 }
8954 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
8955 bnx2x_8727_specific_func(phy, params, ENABLE_TX);
8956 /* If transmitter is disabled, ignore false link up indication */
8957 bnx2x_cl45_read(bp, phy,
8958 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
8959 if (val1 & (1<<15)) {
8960 DP(NETIF_MSG_LINK, "Tx is disabled\n");
8961 return 0;
8962 }
8963
8964 bnx2x_cl45_read(bp, phy,
8965 MDIO_PMA_DEVAD,
8966 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
8967
8968 /*
8969 * Bits 0..2 --> speed detected,
8970 * Bits 13..15--> link is down
8971 */
8972 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
8973 link_up = 1;
8974 vars->line_speed = SPEED_10000;
8975 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
8976 params->port);
8977 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
8978 link_up = 1;
8979 vars->line_speed = SPEED_1000;
8980 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
8981 params->port);
8982 } else {
8983 link_up = 0;
8984 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
8985 params->port);
8986 }
8987
8988 /* Capture 10G link fault. */
8989 if (vars->line_speed == SPEED_10000) {
8990 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
8991 MDIO_PMA_LASI_TXSTAT, &val1);
8992
8993 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
8994 MDIO_PMA_LASI_TXSTAT, &val1);
8995
8996 if (val1 & (1<<0)) {
8997 vars->fault_detected = 1;
8998 }
8999 }
9000
9001 if (link_up) {
9002 bnx2x_ext_phy_resolve_fc(phy, params, vars);
9003 vars->duplex = DUPLEX_FULL;
9004 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
9005 }
9006
9007 if ((DUAL_MEDIA(params)) &&
9008 (phy->req_line_speed == SPEED_1000)) {
9009 bnx2x_cl45_read(bp, phy,
9010 MDIO_PMA_DEVAD,
9011 MDIO_PMA_REG_8727_PCS_GP, &val1);
9012 /*
9013 * In case of dual-media board and 1G, power up the XAUI side,
9014 * otherwise power it down. For 10G it is done automatically
9015 */
9016 if (link_up)
9017 val1 &= ~(3<<10);
9018 else
9019 val1 |= (3<<10);
9020 bnx2x_cl45_write(bp, phy,
9021 MDIO_PMA_DEVAD,
9022 MDIO_PMA_REG_8727_PCS_GP, val1);
9023 }
9024 return link_up;
9025}
9026
9027static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
9028 struct link_params *params)
9029{
9030 struct bnx2x *bp = params->bp;
9031
9032 /* Enable/Disable PHY transmitter output */
9033 bnx2x_set_disable_pmd_transmit(params, phy, 1);
9034
9035 /* Disable Transmitter */
9036 bnx2x_sfp_set_transmitter(params, phy, 0);
9037 /* Clear LASI */
9038 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
9039
9040}
9041
9042/******************************************************************/
9043/* BCM8481/BCM84823/BCM84833 PHY SECTION */
9044/******************************************************************/
9045static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9046 struct link_params *params)
9047{
9048 u16 val, fw_ver1, fw_ver2, cnt;
9049 u8 port;
9050 struct bnx2x *bp = params->bp;
9051
9052 port = params->port;
9053
9054 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
9055 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
9056 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
9057 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
9058 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
9059 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
9060 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
9061
9062 for (cnt = 0; cnt < 100; cnt++) {
9063 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
9064 if (val & 1)
9065 break;
9066 udelay(5);
9067 }
9068 if (cnt == 100) {
9069 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
9070 bnx2x_save_spirom_version(bp, port, 0,
9071 phy->ver_addr);
9072 return;
9073 }
9074
9075
9076 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
9077 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
9078 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
9079 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
9080 for (cnt = 0; cnt < 100; cnt++) {
9081 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
9082 if (val & 1)
9083 break;
9084 udelay(5);
9085 }
9086 if (cnt == 100) {
9087 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
9088 bnx2x_save_spirom_version(bp, port, 0,
9089 phy->ver_addr);
9090 return;
9091 }
9092
9093 /* lower 16 bits of the register SPI_FW_STATUS */
9094 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
9095 /* upper 16 bits of register SPI_FW_STATUS */
9096 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
9097
9098 bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
9099 phy->ver_addr);
9100}
9101
9102static void bnx2x_848xx_set_led(struct bnx2x *bp,
9103 struct bnx2x_phy *phy)
9104{
9105 u16 val;
9106
9107 /* PHYC_CTL_LED_CTL */
9108 bnx2x_cl45_read(bp, phy,
9109 MDIO_PMA_DEVAD,
9110 MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
9111 val &= 0xFE00;
9112 val |= 0x0092;
9113
9114 bnx2x_cl45_write(bp, phy,
9115 MDIO_PMA_DEVAD,
9116 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
9117
9118 bnx2x_cl45_write(bp, phy,
9119 MDIO_PMA_DEVAD,
9120 MDIO_PMA_REG_8481_LED1_MASK,
9121 0x80);
9122
9123 bnx2x_cl45_write(bp, phy,
9124 MDIO_PMA_DEVAD,
9125 MDIO_PMA_REG_8481_LED2_MASK,
9126 0x18);
9127
9128 /* Select activity source by Tx and Rx, as suggested by PHY AE */
9129 bnx2x_cl45_write(bp, phy,
9130 MDIO_PMA_DEVAD,
9131 MDIO_PMA_REG_8481_LED3_MASK,
9132 0x0006);
9133
9134 /* Select the closest activity blink rate to that in 10/100/1000 */
9135 bnx2x_cl45_write(bp, phy,
9136 MDIO_PMA_DEVAD,
9137 MDIO_PMA_REG_8481_LED3_BLINK,
9138 0);
9139
9140 bnx2x_cl45_read(bp, phy,
9141 MDIO_PMA_DEVAD,
9142 MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
9143 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
9144
9145 bnx2x_cl45_write(bp, phy,
9146 MDIO_PMA_DEVAD,
9147 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
9148
9149 /* 'Interrupt Mask' */
9150 bnx2x_cl45_write(bp, phy,
9151 MDIO_AN_DEVAD,
9152 0xFFFB, 0xFFFD);
9153}
9154
9155static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9156 struct link_params *params,
9157 struct link_vars *vars)
9158{
9159 struct bnx2x *bp = params->bp;
9160 u16 autoneg_val, an_1000_val, an_10_100_val;
9161 u16 tmp_req_line_speed;
9162
9163 tmp_req_line_speed = phy->req_line_speed;
9164 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
9165 if (phy->req_line_speed == SPEED_10000)
9166 phy->req_line_speed = SPEED_AUTO_NEG;
9167
9168 /*
9169 * This phy uses the NIG latch mechanism since link indication
9170 * arrives through its LED4 and not via its LASI signal, so we
9171 * get steady signal instead of clear on read
9172 */
9173 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
9174 1 << NIG_LATCH_BC_ENABLE_MI_INT);
9175
9176 bnx2x_cl45_write(bp, phy,
9177 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
9178
9179 bnx2x_848xx_set_led(bp, phy);
9180
9181 /* set 1000 speed advertisement */
9182 bnx2x_cl45_read(bp, phy,
9183 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
9184 &an_1000_val);
9185
9186 bnx2x_ext_phy_set_pause(params, phy, vars);
9187 bnx2x_cl45_read(bp, phy,
9188 MDIO_AN_DEVAD,
9189 MDIO_AN_REG_8481_LEGACY_AN_ADV,
9190 &an_10_100_val);
9191 bnx2x_cl45_read(bp, phy,
9192 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
9193 &autoneg_val);
9194 /* Disable forced speed */
9195 autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
9196 an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
9197
9198 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
9199 (phy->speed_cap_mask &
9200 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
9201 (phy->req_line_speed == SPEED_1000)) {
9202 an_1000_val |= (1<<8);
9203 autoneg_val |= (1<<9 | 1<<12);
9204 if (phy->req_duplex == DUPLEX_FULL)
9205 an_1000_val |= (1<<9);
9206 DP(NETIF_MSG_LINK, "Advertising 1G\n");
9207 } else
9208 an_1000_val &= ~((1<<8) | (1<<9));
9209
9210 bnx2x_cl45_write(bp, phy,
9211 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
9212 an_1000_val);
9213
9214 /* set 100 speed advertisement */
9215 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
9216 (phy->speed_cap_mask &
9217 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
9218 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
9219 (phy->supported &
9220 (SUPPORTED_100baseT_Half |
9221 SUPPORTED_100baseT_Full)))) {
9222 an_10_100_val |= (1<<7);
9223 /* Enable autoneg and restart autoneg for legacy speeds */
9224 autoneg_val |= (1<<9 | 1<<12);
9225
9226 if (phy->req_duplex == DUPLEX_FULL)
9227 an_10_100_val |= (1<<8);
9228 DP(NETIF_MSG_LINK, "Advertising 100M\n");
9229 }
9230 /* set 10 speed advertisement */
9231 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
9232 (phy->speed_cap_mask &
9233 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
9234 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
9235 (phy->supported &
9236 (SUPPORTED_10baseT_Half |
9237 SUPPORTED_10baseT_Full)))) {
9238 an_10_100_val |= (1<<5);
9239 autoneg_val |= (1<<9 | 1<<12);
9240 if (phy->req_duplex == DUPLEX_FULL)
9241 an_10_100_val |= (1<<6);
9242 DP(NETIF_MSG_LINK, "Advertising 10M\n");
9243 }
9244
9245 /* Only 10/100 are allowed to work in FORCE mode */
9246 if ((phy->req_line_speed == SPEED_100) &&
9247 (phy->supported &
9248 (SUPPORTED_100baseT_Half |
9249 SUPPORTED_100baseT_Full))) {
9250 autoneg_val |= (1<<13);
9251 /* Enabled AUTO-MDIX when autoneg is disabled */
9252 bnx2x_cl45_write(bp, phy,
9253 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
9254 (1<<15 | 1<<9 | 7<<0));
9255 DP(NETIF_MSG_LINK, "Setting 100M force\n");
9256 }
9257 if ((phy->req_line_speed == SPEED_10) &&
9258 (phy->supported &
9259 (SUPPORTED_10baseT_Half |
9260 SUPPORTED_10baseT_Full))) {
9261 /* Enabled AUTO-MDIX when autoneg is disabled */
9262 bnx2x_cl45_write(bp, phy,
9263 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
9264 (1<<15 | 1<<9 | 7<<0));
9265 DP(NETIF_MSG_LINK, "Setting 10M force\n");
9266 }
9267
9268 bnx2x_cl45_write(bp, phy,
9269 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
9270 an_10_100_val);
9271
9272 if (phy->req_duplex == DUPLEX_FULL)
9273 autoneg_val |= (1<<8);
9274
9275 /*
9276 * Always write this if this is not 84833.
9277 * For 84833, write it only when it's a forced speed.
9278 */
9279 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
9280 ((autoneg_val & (1<<12)) == 0))
9281 bnx2x_cl45_write(bp, phy,
9282 MDIO_AN_DEVAD,
9283 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
9284
9285 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
9286 (phy->speed_cap_mask &
9287 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
9288 (phy->req_line_speed == SPEED_10000)) {
9289 DP(NETIF_MSG_LINK, "Advertising 10G\n");
9290 /* Restart autoneg for 10G*/
9291
9292 bnx2x_cl45_write(bp, phy,
9293 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
9294 0x3200);
9295 } else
9296 bnx2x_cl45_write(bp, phy,
9297 MDIO_AN_DEVAD,
9298 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
9299 1);
9300
9301 /* Save spirom version */
9302 bnx2x_save_848xx_spirom_version(phy, params);
9303
9304 phy->req_line_speed = tmp_req_line_speed;
9305
9306 return 0;
9307}
9308
9309static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
9310 struct link_params *params,
9311 struct link_vars *vars)
9312{
9313 struct bnx2x *bp = params->bp;
9314 /* Restore normal power mode*/
9315 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
9316 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
9317
9318 /* HW reset */
9319 bnx2x_ext_phy_hw_reset(bp, params->port);
9320 bnx2x_wait_reset_complete(bp, phy, params);
9321
9322 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
9323 return bnx2x_848xx_cmn_config_init(phy, params, vars);
9324}
9325
9326
9327#define PHY84833_HDSHK_WAIT 300
9328static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
9329 struct link_params *params,
9330 struct link_vars *vars)
9331{
9332 u32 idx;
9333 u32 pair_swap;
9334 u16 val;
9335 u16 data;
9336 struct bnx2x *bp = params->bp;
9337 /* Do pair swap */
9338
9339 /* Check for configuration. */
9340 pair_swap = REG_RD(bp, params->shmem_base +
9341 offsetof(struct shmem_region,
9342 dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
9343 PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
9344
9345 if (pair_swap == 0)
9346 return 0;
9347
9348 data = (u16)pair_swap;
9349
9350 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
9351 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9352 MDIO_84833_TOP_CFG_SCRATCH_REG2,
9353 PHY84833_CMD_OPEN_OVERRIDE);
9354 for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
9355 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9356 MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
9357 if (val == PHY84833_CMD_OPEN_FOR_CMDS)
9358 break;
9359 msleep(1);
9360 }
9361 if (idx >= PHY84833_HDSHK_WAIT) {
9362 DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
9363 return -EINVAL;
9364 }
9365
9366 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9367 MDIO_84833_TOP_CFG_SCRATCH_REG4,
9368 data);
9369 /* Issue pair swap command */
9370 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9371 MDIO_84833_TOP_CFG_SCRATCH_REG0,
9372 PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
9373 for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
9374 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9375 MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
9376 if ((val == PHY84833_CMD_COMPLETE_PASS) ||
9377 (val == PHY84833_CMD_COMPLETE_ERROR))
9378 break;
9379 msleep(1);
9380 }
9381 if ((idx >= PHY84833_HDSHK_WAIT) ||
9382 (val == PHY84833_CMD_COMPLETE_ERROR)) {
9383 DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
9384 return -EINVAL;
9385 }
9386 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9387 MDIO_84833_TOP_CFG_SCRATCH_REG2,
9388 PHY84833_CMD_CLEAR_COMPLETE);
9389 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
9390 return 0;
9391}
9392
9393
9394static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
9395 u32 shmem_base_path[],
9396 u32 chip_id)
9397{
9398 u32 reset_pin[2];
9399 u32 idx;
9400 u8 reset_gpios;
9401 if (CHIP_IS_E3(bp)) {
9402 /* Assume that these will be GPIOs, not EPIOs. */
9403 for (idx = 0; idx < 2; idx++) {
9404 /* Map config param to register bit. */
9405 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
9406 offsetof(struct shmem_region,
9407 dev_info.port_hw_config[0].e3_cmn_pin_cfg));
9408 reset_pin[idx] = (reset_pin[idx] &
9409 PORT_HW_CFG_E3_PHY_RESET_MASK) >>
9410 PORT_HW_CFG_E3_PHY_RESET_SHIFT;
9411 reset_pin[idx] -= PIN_CFG_GPIO0_P0;
9412 reset_pin[idx] = (1 << reset_pin[idx]);
9413 }
9414 reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
9415 } else {
9416 /* E2, look from diff place of shmem. */
9417 for (idx = 0; idx < 2; idx++) {
9418 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
9419 offsetof(struct shmem_region,
9420 dev_info.port_hw_config[0].default_cfg));
9421 reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
9422 reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
9423 reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
9424 reset_pin[idx] = (1 << reset_pin[idx]);
9425 }
9426 reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
9427 }
9428
9429 return reset_gpios;
9430}
9431
9432static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9433 struct link_params *params)
9434{
9435 struct bnx2x *bp = params->bp;
9436 u8 reset_gpios;
9437 u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
9438 offsetof(struct shmem2_region,
9439 other_shmem_base_addr));
9440
9441 u32 shmem_base_path[2];
9442 shmem_base_path[0] = params->shmem_base;
9443 shmem_base_path[1] = other_shmem_base_addr;
9444
9445 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
9446 params->chip_id);
9447
9448 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
9449 udelay(10);
9450 DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
9451 reset_gpios);
9452
9453 return 0;
9454}
9455
9456static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
9457 u32 shmem_base_path[],
9458 u32 chip_id)
9459{
9460 u8 reset_gpios;
9461
9462 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
9463
9464 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
9465 udelay(10);
9466 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
9467 msleep(800);
9468 DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
9469 reset_gpios);
9470
9471 return 0;
9472}
9473
9474#define PHY84833_CONSTANT_LATENCY 1193
9475static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9476 struct link_params *params,
9477 struct link_vars *vars)
9478{
9479 struct bnx2x *bp = params->bp;
9480 u8 port, initialize = 1;
9481 u16 val;
9482 u16 temp;
9483 u32 actual_phy_selection, cms_enable, idx;
9484 int rc = 0;
9485
9486 msleep(1);
9487
9488 if (!(CHIP_IS_E1(bp)))
9489 port = BP_PATH(bp);
9490 else
9491 port = params->port;
9492
9493 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
9494 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
9495 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
9496 port);
9497 } else {
9498 /* MDIO reset */
9499 bnx2x_cl45_write(bp, phy,
9500 MDIO_PMA_DEVAD,
9501 MDIO_PMA_REG_CTRL, 0x8000);
9502 /* Bring PHY out of super isolate mode */
9503 bnx2x_cl45_read(bp, phy,
9504 MDIO_CTL_DEVAD,
9505 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
9506 val &= ~MDIO_84833_SUPER_ISOLATE;
9507 bnx2x_cl45_write(bp, phy,
9508 MDIO_CTL_DEVAD,
9509 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
9510 }
9511
9512 bnx2x_wait_reset_complete(bp, phy, params);
9513
9514 /* Wait for GPHY to come out of reset */
9515 msleep(50);
9516
9517 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
9518 bnx2x_84833_pair_swap_cfg(phy, params, vars);
9519
9520 /*
9521 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
9522 */
9523 temp = vars->line_speed;
9524 vars->line_speed = SPEED_10000;
9525 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
9526 bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
9527 vars->line_speed = temp;
9528
9529 /* Set dual-media configuration according to configuration */
9530
9531 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9532 MDIO_CTL_REG_84823_MEDIA, &val);
9533 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
9534 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
9535 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
9536 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
9537 MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
9538
9539 if (CHIP_IS_E3(bp)) {
9540 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
9541 MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
9542 } else {
9543 val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
9544 MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
9545 }
9546
9547 actual_phy_selection = bnx2x_phy_selection(params);
9548
9549 switch (actual_phy_selection) {
9550 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
9551 /* Do nothing. Essentially this is like the priority copper */
9552 break;
9553 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
9554 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
9555 break;
9556 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
9557 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
9558 break;
9559 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
9560 /* Do nothing here. The first PHY won't be initialized at all */
9561 break;
9562 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
9563 val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
9564 initialize = 0;
9565 break;
9566 }
9567 if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
9568 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
9569
9570 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9571 MDIO_CTL_REG_84823_MEDIA, val);
9572 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
9573 params->multi_phy_config, val);
9574
9575 /* AutogrEEEn */
9576 if (params->feature_config_flags &
9577 FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
9578 /* Ensure that f/w is ready */
9579 for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
9580 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9581 MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
9582 if (val == PHY84833_CMD_OPEN_FOR_CMDS)
9583 break;
9584 usleep_range(1000, 1000);
9585 }
9586 if (idx >= PHY84833_HDSHK_WAIT) {
9587 DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
9588 return -EINVAL;
9589 }
9590
9591 /* Select EEE mode */
9592 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9593 MDIO_84833_TOP_CFG_SCRATCH_REG3,
9594 0x2);
9595
9596 /* Set Idle and Latency */
9597 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9598 MDIO_84833_TOP_CFG_SCRATCH_REG4,
9599 PHY84833_CONSTANT_LATENCY + 1);
9600
9601 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9602 MDIO_84833_TOP_CFG_DATA3_REG,
9603 PHY84833_CONSTANT_LATENCY + 1);
9604
9605 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9606 MDIO_84833_TOP_CFG_DATA4_REG,
9607 PHY84833_CONSTANT_LATENCY);
9608
9609 /* Send EEE instruction to command register */
9610 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9611 MDIO_84833_TOP_CFG_SCRATCH_REG0,
9612 PHY84833_DIAG_CMD_SET_EEE_MODE);
9613
9614 /* Ensure that the command has completed */
9615 for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
9616 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9617 MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
9618 if ((val == PHY84833_CMD_COMPLETE_PASS) ||
9619 (val == PHY84833_CMD_COMPLETE_ERROR))
9620 break;
9621 usleep_range(1000, 1000);
9622 }
9623 if ((idx >= PHY84833_HDSHK_WAIT) ||
9624 (val == PHY84833_CMD_COMPLETE_ERROR)) {
9625 DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
9626 return -EINVAL;
9627 }
9628
9629 /* Reset command handler */
9630 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9631 MDIO_84833_TOP_CFG_SCRATCH_REG2,
9632 PHY84833_CMD_CLEAR_COMPLETE);
9633 }
9634
9635 if (initialize)
9636 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
9637 else
9638 bnx2x_save_848xx_spirom_version(phy, params);
9639 /* 84833 PHY has a better feature and doesn't need to support this. */
9640 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
9641 cms_enable = REG_RD(bp, params->shmem_base +
9642 offsetof(struct shmem_region,
9643 dev_info.port_hw_config[params->port].default_cfg)) &
9644 PORT_HW_CFG_ENABLE_CMS_MASK;
9645
9646 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9647 MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
9648 if (cms_enable)
9649 val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
9650 else
9651 val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
9652 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9653 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
9654 }
9655
9656 return rc;
9657}
9658
9659static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
9660 struct link_params *params,
9661 struct link_vars *vars)
9662{
9663 struct bnx2x *bp = params->bp;
9664 u16 val, val1, val2;
9665 u8 link_up = 0;
9666
9667
9668 /* Check 10G-BaseT link status */
9669 /* Check PMD signal ok */
9670 bnx2x_cl45_read(bp, phy,
9671 MDIO_AN_DEVAD, 0xFFFA, &val1);
9672 bnx2x_cl45_read(bp, phy,
9673 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
9674 &val2);
9675 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
9676
9677 /* Check link 10G */
9678 if (val2 & (1<<11)) {
9679 vars->line_speed = SPEED_10000;
9680 vars->duplex = DUPLEX_FULL;
9681 link_up = 1;
9682 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
9683 } else { /* Check Legacy speed link */
9684 u16 legacy_status, legacy_speed;
9685
9686 /* Enable expansion register 0x42 (Operation mode status) */
9687 bnx2x_cl45_write(bp, phy,
9688 MDIO_AN_DEVAD,
9689 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
9690
9691 /* Get legacy speed operation status */
9692 bnx2x_cl45_read(bp, phy,
9693 MDIO_AN_DEVAD,
9694 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
9695 &legacy_status);
9696
9697 DP(NETIF_MSG_LINK, "Legacy speed status"
9698 " = 0x%x\n", legacy_status);
9699 link_up = ((legacy_status & (1<<11)) == (1<<11));
9700 if (link_up) {
9701 legacy_speed = (legacy_status & (3<<9));
9702 if (legacy_speed == (0<<9))
9703 vars->line_speed = SPEED_10;
9704 else if (legacy_speed == (1<<9))
9705 vars->line_speed = SPEED_100;
9706 else if (legacy_speed == (2<<9))
9707 vars->line_speed = SPEED_1000;
9708 else /* Should not happen */
9709 vars->line_speed = 0;
9710
9711 if (legacy_status & (1<<8))
9712 vars->duplex = DUPLEX_FULL;
9713 else
9714 vars->duplex = DUPLEX_HALF;
9715
9716 DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
9717 " is_duplex_full= %d\n", vars->line_speed,
9718 (vars->duplex == DUPLEX_FULL));
9719 /* Check legacy speed AN resolution */
9720 bnx2x_cl45_read(bp, phy,
9721 MDIO_AN_DEVAD,
9722 MDIO_AN_REG_8481_LEGACY_MII_STATUS,
9723 &val);
9724 if (val & (1<<5))
9725 vars->link_status |=
9726 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
9727 bnx2x_cl45_read(bp, phy,
9728 MDIO_AN_DEVAD,
9729 MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
9730 &val);
9731 if ((val & (1<<0)) == 0)
9732 vars->link_status |=
9733 LINK_STATUS_PARALLEL_DETECTION_USED;
9734 }
9735 }
9736 if (link_up) {
9737 DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
9738 vars->line_speed);
9739 bnx2x_ext_phy_resolve_fc(phy, params, vars);
9740 }
9741
9742 return link_up;
9743}
9744
9745
9746static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
9747{
9748 int status = 0;
9749 u32 spirom_ver;
9750 spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
9751 status = bnx2x_format_ver(spirom_ver, str, len);
9752 return status;
9753}
9754
9755static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
9756 struct link_params *params)
9757{
9758 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
9759 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
9760 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
9761 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
9762}
9763
9764static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
9765 struct link_params *params)
9766{
9767 bnx2x_cl45_write(params->bp, phy,
9768 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
9769 bnx2x_cl45_write(params->bp, phy,
9770 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
9771}
9772
9773static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
9774 struct link_params *params)
9775{
9776 struct bnx2x *bp = params->bp;
9777 u8 port;
9778 u16 val16;
9779
9780 if (!(CHIP_IS_E1(bp)))
9781 port = BP_PATH(bp);
9782 else
9783 port = params->port;
9784
9785 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
9786 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
9787 MISC_REGISTERS_GPIO_OUTPUT_LOW,
9788 port);
9789 } else {
9790 bnx2x_cl45_read(bp, phy,
9791 MDIO_CTL_DEVAD,
9792 0x400f, &val16);
9793 bnx2x_cl45_write(bp, phy,
9794 MDIO_PMA_DEVAD,
9795 MDIO_PMA_REG_CTRL, 0x800);
9796 }
9797}
9798
9799static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
9800 struct link_params *params, u8 mode)
9801{
9802 struct bnx2x *bp = params->bp;
9803 u16 val;
9804 u8 port;
9805
9806 if (!(CHIP_IS_E1(bp)))
9807 port = BP_PATH(bp);
9808 else
9809 port = params->port;
9810
9811 switch (mode) {
9812 case LED_MODE_OFF:
9813
9814 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
9815
9816 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
9817 SHARED_HW_CFG_LED_EXTPHY1) {
9818
9819 /* Set LED masks */
9820 bnx2x_cl45_write(bp, phy,
9821 MDIO_PMA_DEVAD,
9822 MDIO_PMA_REG_8481_LED1_MASK,
9823 0x0);
9824
9825 bnx2x_cl45_write(bp, phy,
9826 MDIO_PMA_DEVAD,
9827 MDIO_PMA_REG_8481_LED2_MASK,
9828 0x0);
9829
9830 bnx2x_cl45_write(bp, phy,
9831 MDIO_PMA_DEVAD,
9832 MDIO_PMA_REG_8481_LED3_MASK,
9833 0x0);
9834
9835 bnx2x_cl45_write(bp, phy,
9836 MDIO_PMA_DEVAD,
9837 MDIO_PMA_REG_8481_LED5_MASK,
9838 0x0);
9839
9840 } else {
9841 bnx2x_cl45_write(bp, phy,
9842 MDIO_PMA_DEVAD,
9843 MDIO_PMA_REG_8481_LED1_MASK,
9844 0x0);
9845 }
9846 break;
9847 case LED_MODE_FRONT_PANEL_OFF:
9848
9849 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
9850 port);
9851
9852 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
9853 SHARED_HW_CFG_LED_EXTPHY1) {
9854
9855 /* Set LED masks */
9856 bnx2x_cl45_write(bp, phy,
9857 MDIO_PMA_DEVAD,
9858 MDIO_PMA_REG_8481_LED1_MASK,
9859 0x0);
9860
9861 bnx2x_cl45_write(bp, phy,
9862 MDIO_PMA_DEVAD,
9863 MDIO_PMA_REG_8481_LED2_MASK,
9864 0x0);
9865
9866 bnx2x_cl45_write(bp, phy,
9867 MDIO_PMA_DEVAD,
9868 MDIO_PMA_REG_8481_LED3_MASK,
9869 0x0);
9870
9871 bnx2x_cl45_write(bp, phy,
9872 MDIO_PMA_DEVAD,
9873 MDIO_PMA_REG_8481_LED5_MASK,
9874 0x20);
9875
9876 } else {
9877 bnx2x_cl45_write(bp, phy,
9878 MDIO_PMA_DEVAD,
9879 MDIO_PMA_REG_8481_LED1_MASK,
9880 0x0);
9881 }
9882 break;
9883 case LED_MODE_ON:
9884
9885 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
9886
9887 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
9888 SHARED_HW_CFG_LED_EXTPHY1) {
9889 /* Set control reg */
9890 bnx2x_cl45_read(bp, phy,
9891 MDIO_PMA_DEVAD,
9892 MDIO_PMA_REG_8481_LINK_SIGNAL,
9893 &val);
9894 val &= 0x8000;
9895 val |= 0x2492;
9896
9897 bnx2x_cl45_write(bp, phy,
9898 MDIO_PMA_DEVAD,
9899 MDIO_PMA_REG_8481_LINK_SIGNAL,
9900 val);
9901
9902 /* Set LED masks */
9903 bnx2x_cl45_write(bp, phy,
9904 MDIO_PMA_DEVAD,
9905 MDIO_PMA_REG_8481_LED1_MASK,
9906 0x0);
9907
9908 bnx2x_cl45_write(bp, phy,
9909 MDIO_PMA_DEVAD,
9910 MDIO_PMA_REG_8481_LED2_MASK,
9911 0x20);
9912
9913 bnx2x_cl45_write(bp, phy,
9914 MDIO_PMA_DEVAD,
9915 MDIO_PMA_REG_8481_LED3_MASK,
9916 0x20);
9917
9918 bnx2x_cl45_write(bp, phy,
9919 MDIO_PMA_DEVAD,
9920 MDIO_PMA_REG_8481_LED5_MASK,
9921 0x0);
9922 } else {
9923 bnx2x_cl45_write(bp, phy,
9924 MDIO_PMA_DEVAD,
9925 MDIO_PMA_REG_8481_LED1_MASK,
9926 0x20);
9927 }
9928 break;
9929
9930 case LED_MODE_OPER:
9931
9932 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
9933
9934 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
9935 SHARED_HW_CFG_LED_EXTPHY1) {
9936
9937 /* Set control reg */
9938 bnx2x_cl45_read(bp, phy,
9939 MDIO_PMA_DEVAD,
9940 MDIO_PMA_REG_8481_LINK_SIGNAL,
9941 &val);
9942
9943 if (!((val &
9944 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
9945 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
9946 DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
9947 bnx2x_cl45_write(bp, phy,
9948 MDIO_PMA_DEVAD,
9949 MDIO_PMA_REG_8481_LINK_SIGNAL,
9950 0xa492);
9951 }
9952
9953 /* Set LED masks */
9954 bnx2x_cl45_write(bp, phy,
9955 MDIO_PMA_DEVAD,
9956 MDIO_PMA_REG_8481_LED1_MASK,
9957 0x10);
9958
9959 bnx2x_cl45_write(bp, phy,
9960 MDIO_PMA_DEVAD,
9961 MDIO_PMA_REG_8481_LED2_MASK,
9962 0x80);
9963
9964 bnx2x_cl45_write(bp, phy,
9965 MDIO_PMA_DEVAD,
9966 MDIO_PMA_REG_8481_LED3_MASK,
9967 0x98);
9968
9969 bnx2x_cl45_write(bp, phy,
9970 MDIO_PMA_DEVAD,
9971 MDIO_PMA_REG_8481_LED5_MASK,
9972 0x40);
9973
9974 } else {
9975 bnx2x_cl45_write(bp, phy,
9976 MDIO_PMA_DEVAD,
9977 MDIO_PMA_REG_8481_LED1_MASK,
9978 0x80);
9979
9980 /* Tell LED3 to blink on source */
9981 bnx2x_cl45_read(bp, phy,
9982 MDIO_PMA_DEVAD,
9983 MDIO_PMA_REG_8481_LINK_SIGNAL,
9984 &val);
9985 val &= ~(7<<6);
9986 val |= (1<<6); /* A83B[8:6]= 1 */
9987 bnx2x_cl45_write(bp, phy,
9988 MDIO_PMA_DEVAD,
9989 MDIO_PMA_REG_8481_LINK_SIGNAL,
9990 val);
9991 }
9992 break;
9993 }
9994
9995 /*
9996 * This is a workaround for E3+84833 until autoneg
9997 * restart is fixed in f/w
9998 */
9999 if (CHIP_IS_E3(bp)) {
10000 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
10001 MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
10002 }
10003}
10004
10005/******************************************************************/
10006/* 54618SE PHY SECTION */
10007/******************************************************************/
10008static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10009 struct link_params *params,
10010 struct link_vars *vars)
10011{
10012 struct bnx2x *bp = params->bp;
10013 u8 port;
10014 u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
10015 u32 cfg_pin;
10016
10017 DP(NETIF_MSG_LINK, "54618SE cfg init\n");
10018 usleep_range(1000, 1000);
10019
10020 /* This works with E3 only, no need to check the chip
10021 before determining the port. */
10022 port = params->port;
10023
10024 cfg_pin = (REG_RD(bp, params->shmem_base +
10025 offsetof(struct shmem_region,
10026 dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
10027 PORT_HW_CFG_E3_PHY_RESET_MASK) >>
10028 PORT_HW_CFG_E3_PHY_RESET_SHIFT;
10029
10030 /* Drive pin high to bring the GPHY out of reset. */
10031 bnx2x_set_cfg_pin(bp, cfg_pin, 1);
10032
10033 /* wait for GPHY to reset */
10034 msleep(50);
10035
10036 /* reset phy */
10037 bnx2x_cl22_write(bp, phy,
10038 MDIO_PMA_REG_CTRL, 0x8000);
10039 bnx2x_wait_reset_complete(bp, phy, params);
10040
10041 /*wait for GPHY to reset */
10042 msleep(50);
10043
10044 /* Configure LED4: set to INTR (0x6). */
10045 /* Accessing shadow register 0xe. */
10046 bnx2x_cl22_write(bp, phy,
10047 MDIO_REG_GPHY_SHADOW,
10048 MDIO_REG_GPHY_SHADOW_LED_SEL2);
10049 bnx2x_cl22_read(bp, phy,
10050 MDIO_REG_GPHY_SHADOW,
10051 &temp);
10052 temp &= ~(0xf << 4);
10053 temp |= (0x6 << 4);
10054 bnx2x_cl22_write(bp, phy,
10055 MDIO_REG_GPHY_SHADOW,
10056 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10057 /* Configure INTR based on link status change. */
10058 bnx2x_cl22_write(bp, phy,
10059 MDIO_REG_INTR_MASK,
10060 ~MDIO_REG_INTR_MASK_LINK_STATUS);
10061
10062 /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
10063 bnx2x_cl22_write(bp, phy,
10064 MDIO_REG_GPHY_SHADOW,
10065 MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
10066 bnx2x_cl22_read(bp, phy,
10067 MDIO_REG_GPHY_SHADOW,
10068 &temp);
10069 temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
10070 bnx2x_cl22_write(bp, phy,
10071 MDIO_REG_GPHY_SHADOW,
10072 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10073
10074 /* Set up fc */
10075 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
10076 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
10077 fc_val = 0;
10078 if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
10079 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
10080 fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
10081
10082 if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
10083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
10084 fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
10085
10086 /* read all advertisement */
10087 bnx2x_cl22_read(bp, phy,
10088 0x09,
10089 &an_1000_val);
10090
10091 bnx2x_cl22_read(bp, phy,
10092 0x04,
10093 &an_10_100_val);
10094
10095 bnx2x_cl22_read(bp, phy,
10096 MDIO_PMA_REG_CTRL,
10097 &autoneg_val);
10098
10099 /* Disable forced speed */
10100 autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
10101 an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
10102 (1<<11));
10103
10104 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10105 (phy->speed_cap_mask &
10106 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
10107 (phy->req_line_speed == SPEED_1000)) {
10108 an_1000_val |= (1<<8);
10109 autoneg_val |= (1<<9 | 1<<12);
10110 if (phy->req_duplex == DUPLEX_FULL)
10111 an_1000_val |= (1<<9);
10112 DP(NETIF_MSG_LINK, "Advertising 1G\n");
10113 } else
10114 an_1000_val &= ~((1<<8) | (1<<9));
10115
10116 bnx2x_cl22_write(bp, phy,
10117 0x09,
10118 an_1000_val);
10119 bnx2x_cl22_read(bp, phy,
10120 0x09,
10121 &an_1000_val);
10122
10123 /* set 100 speed advertisement */
10124 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10125 (phy->speed_cap_mask &
10126 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
10127 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
10128 an_10_100_val |= (1<<7);
10129 /* Enable autoneg and restart autoneg for legacy speeds */
10130 autoneg_val |= (1<<9 | 1<<12);
10131
10132 if (phy->req_duplex == DUPLEX_FULL)
10133 an_10_100_val |= (1<<8);
10134 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10135 }
10136
10137 /* set 10 speed advertisement */
10138 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10139 (phy->speed_cap_mask &
10140 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
10141 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
10142 an_10_100_val |= (1<<5);
10143 autoneg_val |= (1<<9 | 1<<12);
10144 if (phy->req_duplex == DUPLEX_FULL)
10145 an_10_100_val |= (1<<6);
10146 DP(NETIF_MSG_LINK, "Advertising 10M\n");
10147 }
10148
10149 /* Only 10/100 are allowed to work in FORCE mode */
10150 if (phy->req_line_speed == SPEED_100) {
10151 autoneg_val |= (1<<13);
10152 /* Enabled AUTO-MDIX when autoneg is disabled */
10153 bnx2x_cl22_write(bp, phy,
10154 0x18,
10155 (1<<15 | 1<<9 | 7<<0));
10156 DP(NETIF_MSG_LINK, "Setting 100M force\n");
10157 }
10158 if (phy->req_line_speed == SPEED_10) {
10159 /* Enabled AUTO-MDIX when autoneg is disabled */
10160 bnx2x_cl22_write(bp, phy,
10161 0x18,
10162 (1<<15 | 1<<9 | 7<<0));
10163 DP(NETIF_MSG_LINK, "Setting 10M force\n");
10164 }
10165
10166 /* Check if we should turn on Auto-GrEEEn */
10167 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
10168 if (temp == MDIO_REG_GPHY_ID_54618SE) {
10169 if (params->feature_config_flags &
10170 FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
10171 temp = 6;
10172 DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
10173 } else {
10174 temp = 0;
10175 DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
10176 }
10177 bnx2x_cl22_write(bp, phy,
10178 MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
10179 bnx2x_cl22_write(bp, phy,
10180 MDIO_REG_GPHY_CL45_DATA_REG,
10181 MDIO_REG_GPHY_EEE_ADV);
10182 bnx2x_cl22_write(bp, phy,
10183 MDIO_REG_GPHY_CL45_ADDR_REG,
10184 (0x1 << 14) | MDIO_AN_DEVAD);
10185 bnx2x_cl22_write(bp, phy,
10186 MDIO_REG_GPHY_CL45_DATA_REG,
10187 temp);
10188 }
10189
10190 bnx2x_cl22_write(bp, phy,
10191 0x04,
10192 an_10_100_val | fc_val);
10193
10194 if (phy->req_duplex == DUPLEX_FULL)
10195 autoneg_val |= (1<<8);
10196
10197 bnx2x_cl22_write(bp, phy,
10198 MDIO_PMA_REG_CTRL, autoneg_val);
10199
10200 return 0;
10201}
10202
10203static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy,
10204 struct link_params *params, u8 mode)
10205{
10206 struct bnx2x *bp = params->bp;
10207 DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode);
10208 switch (mode) {
10209 case LED_MODE_FRONT_PANEL_OFF:
10210 case LED_MODE_OFF:
10211 case LED_MODE_OPER:
10212 case LED_MODE_ON:
10213 default:
10214 break;
10215 }
10216 return;
10217}
10218
10219static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
10220 struct link_params *params)
10221{
10222 struct bnx2x *bp = params->bp;
10223 u32 cfg_pin;
10224 u8 port;
10225
10226 /*
10227 * In case of no EPIO routed to reset the GPHY, put it
10228 * in low power mode.
10229 */
10230 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
10231 /*
10232 * This works with E3 only, no need to check the chip
10233 * before determining the port.
10234 */
10235 port = params->port;
10236 cfg_pin = (REG_RD(bp, params->shmem_base +
10237 offsetof(struct shmem_region,
10238 dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
10239 PORT_HW_CFG_E3_PHY_RESET_MASK) >>
10240 PORT_HW_CFG_E3_PHY_RESET_SHIFT;
10241
10242 /* Drive pin low to put GPHY in reset. */
10243 bnx2x_set_cfg_pin(bp, cfg_pin, 0);
10244}
10245
10246static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10247 struct link_params *params,
10248 struct link_vars *vars)
10249{
10250 struct bnx2x *bp = params->bp;
10251 u16 val;
10252 u8 link_up = 0;
10253 u16 legacy_status, legacy_speed;
10254
10255 /* Get speed operation status */
10256 bnx2x_cl22_read(bp, phy,
10257 0x19,
10258 &legacy_status);
10259 DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
10260
10261 /* Read status to clear the PHY interrupt. */
10262 bnx2x_cl22_read(bp, phy,
10263 MDIO_REG_INTR_STATUS,
10264 &val);
10265
10266 link_up = ((legacy_status & (1<<2)) == (1<<2));
10267
10268 if (link_up) {
10269 legacy_speed = (legacy_status & (7<<8));
10270 if (legacy_speed == (7<<8)) {
10271 vars->line_speed = SPEED_1000;
10272 vars->duplex = DUPLEX_FULL;
10273 } else if (legacy_speed == (6<<8)) {
10274 vars->line_speed = SPEED_1000;
10275 vars->duplex = DUPLEX_HALF;
10276 } else if (legacy_speed == (5<<8)) {
10277 vars->line_speed = SPEED_100;
10278 vars->duplex = DUPLEX_FULL;
10279 }
10280 /* Omitting 100Base-T4 for now */
10281 else if (legacy_speed == (3<<8)) {
10282 vars->line_speed = SPEED_100;
10283 vars->duplex = DUPLEX_HALF;
10284 } else if (legacy_speed == (2<<8)) {
10285 vars->line_speed = SPEED_10;
10286 vars->duplex = DUPLEX_FULL;
10287 } else if (legacy_speed == (1<<8)) {
10288 vars->line_speed = SPEED_10;
10289 vars->duplex = DUPLEX_HALF;
10290 } else /* Should not happen */
10291 vars->line_speed = 0;
10292
10293 DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
10294 " is_duplex_full= %d\n", vars->line_speed,
10295 (vars->duplex == DUPLEX_FULL));
10296
10297 /* Check legacy speed AN resolution */
10298 bnx2x_cl22_read(bp, phy,
10299 0x01,
10300 &val);
10301 if (val & (1<<5))
10302 vars->link_status |=
10303 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
10304 bnx2x_cl22_read(bp, phy,
10305 0x06,
10306 &val);
10307 if ((val & (1<<0)) == 0)
10308 vars->link_status |=
10309 LINK_STATUS_PARALLEL_DETECTION_USED;
10310
10311 DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
10312 vars->line_speed);
10313
10314 /* Report whether EEE is resolved. */
10315 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
10316 if (val == MDIO_REG_GPHY_ID_54618SE) {
10317 if (vars->link_status &
10318 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
10319 val = 0;
10320 else {
10321 bnx2x_cl22_write(bp, phy,
10322 MDIO_REG_GPHY_CL45_ADDR_REG,
10323 MDIO_AN_DEVAD);
10324 bnx2x_cl22_write(bp, phy,
10325 MDIO_REG_GPHY_CL45_DATA_REG,
10326 MDIO_REG_GPHY_EEE_RESOLVED);
10327 bnx2x_cl22_write(bp, phy,
10328 MDIO_REG_GPHY_CL45_ADDR_REG,
10329 (0x1 << 14) | MDIO_AN_DEVAD);
10330 bnx2x_cl22_read(bp, phy,
10331 MDIO_REG_GPHY_CL45_DATA_REG,
10332 &val);
10333 }
10334 DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
10335 }
10336
10337 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10338 }
10339 return link_up;
10340}
10341
10342static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
10343 struct link_params *params)
10344{
10345 struct bnx2x *bp = params->bp;
10346 u16 val;
10347 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10348
10349 DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
10350
10351 /* Enable master/slave manual mmode and set to master */
10352 /* mii write 9 [bits set 11 12] */
10353 bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
10354
10355 /* forced 1G and disable autoneg */
10356 /* set val [mii read 0] */
10357 /* set val [expr $val & [bits clear 6 12 13]] */
10358 /* set val [expr $val | [bits set 6 8]] */
10359 /* mii write 0 $val */
10360 bnx2x_cl22_read(bp, phy, 0x00, &val);
10361 val &= ~((1<<6) | (1<<12) | (1<<13));
10362 val |= (1<<6) | (1<<8);
10363 bnx2x_cl22_write(bp, phy, 0x00, val);
10364
10365 /* Set external loopback and Tx using 6dB coding */
10366 /* mii write 0x18 7 */
10367 /* set val [mii read 0x18] */
10368 /* mii write 0x18 [expr $val | [bits set 10 15]] */
10369 bnx2x_cl22_write(bp, phy, 0x18, 7);
10370 bnx2x_cl22_read(bp, phy, 0x18, &val);
10371 bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
10372
10373 /* This register opens the gate for the UMAC despite its name */
10374 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
10375
10376 /*
10377 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
10378 * length used by the MAC receive logic to check frames.
10379 */
10380 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
10381}
10382
10383/******************************************************************/
10384/* SFX7101 PHY SECTION */
10385/******************************************************************/
10386static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
10387 struct link_params *params)
10388{
10389 struct bnx2x *bp = params->bp;
10390 /* SFX7101_XGXS_TEST1 */
10391 bnx2x_cl45_write(bp, phy,
10392 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
10393}
10394
10395static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
10396 struct link_params *params,
10397 struct link_vars *vars)
10398{
10399 u16 fw_ver1, fw_ver2, val;
10400 struct bnx2x *bp = params->bp;
10401 DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
10402
10403 /* Restore normal power mode*/
10404 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
10405 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
10406 /* HW reset */
10407 bnx2x_ext_phy_hw_reset(bp, params->port);
10408 bnx2x_wait_reset_complete(bp, phy, params);
10409
10410 bnx2x_cl45_write(bp, phy,
10411 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
10412 DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
10413 bnx2x_cl45_write(bp, phy,
10414 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
10415
10416 bnx2x_ext_phy_set_pause(params, phy, vars);
10417 /* Restart autoneg */
10418 bnx2x_cl45_read(bp, phy,
10419 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
10420 val |= 0x200;
10421 bnx2x_cl45_write(bp, phy,
10422 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
10423
10424 /* Save spirom version */
10425 bnx2x_cl45_read(bp, phy,
10426 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
10427
10428 bnx2x_cl45_read(bp, phy,
10429 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
10430 bnx2x_save_spirom_version(bp, params->port,
10431 (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
10432 return 0;
10433}
10434
10435static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
10436 struct link_params *params,
10437 struct link_vars *vars)
10438{
10439 struct bnx2x *bp = params->bp;
10440 u8 link_up;
10441 u16 val1, val2;
10442 bnx2x_cl45_read(bp, phy,
10443 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
10444 bnx2x_cl45_read(bp, phy,
10445 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
10446 DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
10447 val2, val1);
10448 bnx2x_cl45_read(bp, phy,
10449 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
10450 bnx2x_cl45_read(bp, phy,
10451 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
10452 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
10453 val2, val1);
10454 link_up = ((val1 & 4) == 4);
10455 /* if link is up print the AN outcome of the SFX7101 PHY */
10456 if (link_up) {
10457 bnx2x_cl45_read(bp, phy,
10458 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
10459 &val2);
10460 vars->line_speed = SPEED_10000;
10461 vars->duplex = DUPLEX_FULL;
10462 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
10463 val2, (val2 & (1<<14)));
10464 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
10465 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10466 }
10467 return link_up;
10468}
10469
10470static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
10471{
10472 if (*len < 5)
10473 return -EINVAL;
10474 str[0] = (spirom_ver & 0xFF);
10475 str[1] = (spirom_ver & 0xFF00) >> 8;
10476 str[2] = (spirom_ver & 0xFF0000) >> 16;
10477 str[3] = (spirom_ver & 0xFF000000) >> 24;
10478 str[4] = '\0';
10479 *len -= 5;
10480 return 0;
10481}
10482
10483void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
10484{
10485 u16 val, cnt;
10486
10487 bnx2x_cl45_read(bp, phy,
10488 MDIO_PMA_DEVAD,
10489 MDIO_PMA_REG_7101_RESET, &val);
10490
10491 for (cnt = 0; cnt < 10; cnt++) {
10492 msleep(50);
10493 /* Writes a self-clearing reset */
10494 bnx2x_cl45_write(bp, phy,
10495 MDIO_PMA_DEVAD,
10496 MDIO_PMA_REG_7101_RESET,
10497 (val | (1<<15)));
10498 /* Wait for clear */
10499 bnx2x_cl45_read(bp, phy,
10500 MDIO_PMA_DEVAD,
10501 MDIO_PMA_REG_7101_RESET, &val);
10502
10503 if ((val & (1<<15)) == 0)
10504 break;
10505 }
10506}
10507
10508static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
10509 struct link_params *params) {
10510 /* Low power mode is controlled by GPIO 2 */
10511 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
10512 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
10513 /* The PHY reset is controlled by GPIO 1 */
10514 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
10515 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
10516}
10517
10518static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
10519 struct link_params *params, u8 mode)
10520{
10521 u16 val = 0;
10522 struct bnx2x *bp = params->bp;
10523 switch (mode) {
10524 case LED_MODE_FRONT_PANEL_OFF:
10525 case LED_MODE_OFF:
10526 val = 2;
10527 break;
10528 case LED_MODE_ON:
10529 val = 1;
10530 break;
10531 case LED_MODE_OPER:
10532 val = 0;
10533 break;
10534 }
10535 bnx2x_cl45_write(bp, phy,
10536 MDIO_PMA_DEVAD,
10537 MDIO_PMA_REG_7107_LINK_LED_CNTL,
10538 val);
10539}
10540
10541/******************************************************************/
10542/* STATIC PHY DECLARATION */
10543/******************************************************************/
10544
10545static struct bnx2x_phy phy_null = {
10546 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
10547 .addr = 0,
10548 .def_md_devad = 0,
10549 .flags = FLAGS_INIT_XGXS_FIRST,
10550 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10551 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10552 .mdio_ctrl = 0,
10553 .supported = 0,
10554 .media_type = ETH_PHY_NOT_PRESENT,
10555 .ver_addr = 0,
10556 .req_flow_ctrl = 0,
10557 .req_line_speed = 0,
10558 .speed_cap_mask = 0,
10559 .req_duplex = 0,
10560 .rsrv = 0,
10561 .config_init = (config_init_t)NULL,
10562 .read_status = (read_status_t)NULL,
10563 .link_reset = (link_reset_t)NULL,
10564 .config_loopback = (config_loopback_t)NULL,
10565 .format_fw_ver = (format_fw_ver_t)NULL,
10566 .hw_reset = (hw_reset_t)NULL,
10567 .set_link_led = (set_link_led_t)NULL,
10568 .phy_specific_func = (phy_specific_func_t)NULL
10569};
10570
10571static struct bnx2x_phy phy_serdes = {
10572 .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
10573 .addr = 0xff,
10574 .def_md_devad = 0,
10575 .flags = 0,
10576 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10577 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10578 .mdio_ctrl = 0,
10579 .supported = (SUPPORTED_10baseT_Half |
10580 SUPPORTED_10baseT_Full |
10581 SUPPORTED_100baseT_Half |
10582 SUPPORTED_100baseT_Full |
10583 SUPPORTED_1000baseT_Full |
10584 SUPPORTED_2500baseX_Full |
10585 SUPPORTED_TP |
10586 SUPPORTED_Autoneg |
10587 SUPPORTED_Pause |
10588 SUPPORTED_Asym_Pause),
10589 .media_type = ETH_PHY_BASE_T,
10590 .ver_addr = 0,
10591 .req_flow_ctrl = 0,
10592 .req_line_speed = 0,
10593 .speed_cap_mask = 0,
10594 .req_duplex = 0,
10595 .rsrv = 0,
10596 .config_init = (config_init_t)bnx2x_xgxs_config_init,
10597 .read_status = (read_status_t)bnx2x_link_settings_status,
10598 .link_reset = (link_reset_t)bnx2x_int_link_reset,
10599 .config_loopback = (config_loopback_t)NULL,
10600 .format_fw_ver = (format_fw_ver_t)NULL,
10601 .hw_reset = (hw_reset_t)NULL,
10602 .set_link_led = (set_link_led_t)NULL,
10603 .phy_specific_func = (phy_specific_func_t)NULL
10604};
10605
10606static struct bnx2x_phy phy_xgxs = {
10607 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
10608 .addr = 0xff,
10609 .def_md_devad = 0,
10610 .flags = 0,
10611 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10612 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10613 .mdio_ctrl = 0,
10614 .supported = (SUPPORTED_10baseT_Half |
10615 SUPPORTED_10baseT_Full |
10616 SUPPORTED_100baseT_Half |
10617 SUPPORTED_100baseT_Full |
10618 SUPPORTED_1000baseT_Full |
10619 SUPPORTED_2500baseX_Full |
10620 SUPPORTED_10000baseT_Full |
10621 SUPPORTED_FIBRE |
10622 SUPPORTED_Autoneg |
10623 SUPPORTED_Pause |
10624 SUPPORTED_Asym_Pause),
10625 .media_type = ETH_PHY_CX4,
10626 .ver_addr = 0,
10627 .req_flow_ctrl = 0,
10628 .req_line_speed = 0,
10629 .speed_cap_mask = 0,
10630 .req_duplex = 0,
10631 .rsrv = 0,
10632 .config_init = (config_init_t)bnx2x_xgxs_config_init,
10633 .read_status = (read_status_t)bnx2x_link_settings_status,
10634 .link_reset = (link_reset_t)bnx2x_int_link_reset,
10635 .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
10636 .format_fw_ver = (format_fw_ver_t)NULL,
10637 .hw_reset = (hw_reset_t)NULL,
10638 .set_link_led = (set_link_led_t)NULL,
10639 .phy_specific_func = (phy_specific_func_t)NULL
10640};
10641static struct bnx2x_phy phy_warpcore = {
10642 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
10643 .addr = 0xff,
10644 .def_md_devad = 0,
10645 .flags = FLAGS_HW_LOCK_REQUIRED,
10646 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10647 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10648 .mdio_ctrl = 0,
10649 .supported = (SUPPORTED_10baseT_Half |
10650 SUPPORTED_10baseT_Full |
10651 SUPPORTED_100baseT_Half |
10652 SUPPORTED_100baseT_Full |
10653 SUPPORTED_1000baseT_Full |
10654 SUPPORTED_10000baseT_Full |
10655 SUPPORTED_20000baseKR2_Full |
10656 SUPPORTED_20000baseMLD2_Full |
10657 SUPPORTED_FIBRE |
10658 SUPPORTED_Autoneg |
10659 SUPPORTED_Pause |
10660 SUPPORTED_Asym_Pause),
10661 .media_type = ETH_PHY_UNSPECIFIED,
10662 .ver_addr = 0,
10663 .req_flow_ctrl = 0,
10664 .req_line_speed = 0,
10665 .speed_cap_mask = 0,
10666 /* req_duplex = */0,
10667 /* rsrv = */0,
10668 .config_init = (config_init_t)bnx2x_warpcore_config_init,
10669 .read_status = (read_status_t)bnx2x_warpcore_read_status,
10670 .link_reset = (link_reset_t)bnx2x_warpcore_link_reset,
10671 .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
10672 .format_fw_ver = (format_fw_ver_t)NULL,
10673 .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset,
10674 .set_link_led = (set_link_led_t)NULL,
10675 .phy_specific_func = (phy_specific_func_t)NULL
10676};
10677
10678
10679static struct bnx2x_phy phy_7101 = {
10680 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
10681 .addr = 0xff,
10682 .def_md_devad = 0,
10683 .flags = FLAGS_FAN_FAILURE_DET_REQ,
10684 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10685 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10686 .mdio_ctrl = 0,
10687 .supported = (SUPPORTED_10000baseT_Full |
10688 SUPPORTED_TP |
10689 SUPPORTED_Autoneg |
10690 SUPPORTED_Pause |
10691 SUPPORTED_Asym_Pause),
10692 .media_type = ETH_PHY_BASE_T,
10693 .ver_addr = 0,
10694 .req_flow_ctrl = 0,
10695 .req_line_speed = 0,
10696 .speed_cap_mask = 0,
10697 .req_duplex = 0,
10698 .rsrv = 0,
10699 .config_init = (config_init_t)bnx2x_7101_config_init,
10700 .read_status = (read_status_t)bnx2x_7101_read_status,
10701 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
10702 .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
10703 .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
10704 .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
10705 .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
10706 .phy_specific_func = (phy_specific_func_t)NULL
10707};
10708static struct bnx2x_phy phy_8073 = {
10709 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
10710 .addr = 0xff,
10711 .def_md_devad = 0,
10712 .flags = FLAGS_HW_LOCK_REQUIRED,
10713 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10714 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10715 .mdio_ctrl = 0,
10716 .supported = (SUPPORTED_10000baseT_Full |
10717 SUPPORTED_2500baseX_Full |
10718 SUPPORTED_1000baseT_Full |
10719 SUPPORTED_FIBRE |
10720 SUPPORTED_Autoneg |
10721 SUPPORTED_Pause |
10722 SUPPORTED_Asym_Pause),
10723 .media_type = ETH_PHY_KR,
10724 .ver_addr = 0,
10725 .req_flow_ctrl = 0,
10726 .req_line_speed = 0,
10727 .speed_cap_mask = 0,
10728 .req_duplex = 0,
10729 .rsrv = 0,
10730 .config_init = (config_init_t)bnx2x_8073_config_init,
10731 .read_status = (read_status_t)bnx2x_8073_read_status,
10732 .link_reset = (link_reset_t)bnx2x_8073_link_reset,
10733 .config_loopback = (config_loopback_t)NULL,
10734 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
10735 .hw_reset = (hw_reset_t)NULL,
10736 .set_link_led = (set_link_led_t)NULL,
10737 .phy_specific_func = (phy_specific_func_t)NULL
10738};
10739static struct bnx2x_phy phy_8705 = {
10740 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
10741 .addr = 0xff,
10742 .def_md_devad = 0,
10743 .flags = FLAGS_INIT_XGXS_FIRST,
10744 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10745 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10746 .mdio_ctrl = 0,
10747 .supported = (SUPPORTED_10000baseT_Full |
10748 SUPPORTED_FIBRE |
10749 SUPPORTED_Pause |
10750 SUPPORTED_Asym_Pause),
10751 .media_type = ETH_PHY_XFP_FIBER,
10752 .ver_addr = 0,
10753 .req_flow_ctrl = 0,
10754 .req_line_speed = 0,
10755 .speed_cap_mask = 0,
10756 .req_duplex = 0,
10757 .rsrv = 0,
10758 .config_init = (config_init_t)bnx2x_8705_config_init,
10759 .read_status = (read_status_t)bnx2x_8705_read_status,
10760 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
10761 .config_loopback = (config_loopback_t)NULL,
10762 .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
10763 .hw_reset = (hw_reset_t)NULL,
10764 .set_link_led = (set_link_led_t)NULL,
10765 .phy_specific_func = (phy_specific_func_t)NULL
10766};
10767static struct bnx2x_phy phy_8706 = {
10768 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
10769 .addr = 0xff,
10770 .def_md_devad = 0,
10771 .flags = FLAGS_INIT_XGXS_FIRST,
10772 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10773 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10774 .mdio_ctrl = 0,
10775 .supported = (SUPPORTED_10000baseT_Full |
10776 SUPPORTED_1000baseT_Full |
10777 SUPPORTED_FIBRE |
10778 SUPPORTED_Pause |
10779 SUPPORTED_Asym_Pause),
10780 .media_type = ETH_PHY_SFP_FIBER,
10781 .ver_addr = 0,
10782 .req_flow_ctrl = 0,
10783 .req_line_speed = 0,
10784 .speed_cap_mask = 0,
10785 .req_duplex = 0,
10786 .rsrv = 0,
10787 .config_init = (config_init_t)bnx2x_8706_config_init,
10788 .read_status = (read_status_t)bnx2x_8706_read_status,
10789 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
10790 .config_loopback = (config_loopback_t)NULL,
10791 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
10792 .hw_reset = (hw_reset_t)NULL,
10793 .set_link_led = (set_link_led_t)NULL,
10794 .phy_specific_func = (phy_specific_func_t)NULL
10795};
10796
10797static struct bnx2x_phy phy_8726 = {
10798 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
10799 .addr = 0xff,
10800 .def_md_devad = 0,
10801 .flags = (FLAGS_HW_LOCK_REQUIRED |
10802 FLAGS_INIT_XGXS_FIRST),
10803 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10804 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10805 .mdio_ctrl = 0,
10806 .supported = (SUPPORTED_10000baseT_Full |
10807 SUPPORTED_1000baseT_Full |
10808 SUPPORTED_Autoneg |
10809 SUPPORTED_FIBRE |
10810 SUPPORTED_Pause |
10811 SUPPORTED_Asym_Pause),
10812 .media_type = ETH_PHY_NOT_PRESENT,
10813 .ver_addr = 0,
10814 .req_flow_ctrl = 0,
10815 .req_line_speed = 0,
10816 .speed_cap_mask = 0,
10817 .req_duplex = 0,
10818 .rsrv = 0,
10819 .config_init = (config_init_t)bnx2x_8726_config_init,
10820 .read_status = (read_status_t)bnx2x_8726_read_status,
10821 .link_reset = (link_reset_t)bnx2x_8726_link_reset,
10822 .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
10823 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
10824 .hw_reset = (hw_reset_t)NULL,
10825 .set_link_led = (set_link_led_t)NULL,
10826 .phy_specific_func = (phy_specific_func_t)NULL
10827};
10828
10829static struct bnx2x_phy phy_8727 = {
10830 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
10831 .addr = 0xff,
10832 .def_md_devad = 0,
10833 .flags = FLAGS_FAN_FAILURE_DET_REQ,
10834 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10835 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10836 .mdio_ctrl = 0,
10837 .supported = (SUPPORTED_10000baseT_Full |
10838 SUPPORTED_1000baseT_Full |
10839 SUPPORTED_FIBRE |
10840 SUPPORTED_Pause |
10841 SUPPORTED_Asym_Pause),
10842 .media_type = ETH_PHY_NOT_PRESENT,
10843 .ver_addr = 0,
10844 .req_flow_ctrl = 0,
10845 .req_line_speed = 0,
10846 .speed_cap_mask = 0,
10847 .req_duplex = 0,
10848 .rsrv = 0,
10849 .config_init = (config_init_t)bnx2x_8727_config_init,
10850 .read_status = (read_status_t)bnx2x_8727_read_status,
10851 .link_reset = (link_reset_t)bnx2x_8727_link_reset,
10852 .config_loopback = (config_loopback_t)NULL,
10853 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
10854 .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
10855 .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
10856 .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
10857};
10858static struct bnx2x_phy phy_8481 = {
10859 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
10860 .addr = 0xff,
10861 .def_md_devad = 0,
10862 .flags = FLAGS_FAN_FAILURE_DET_REQ |
10863 FLAGS_REARM_LATCH_SIGNAL,
10864 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10865 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10866 .mdio_ctrl = 0,
10867 .supported = (SUPPORTED_10baseT_Half |
10868 SUPPORTED_10baseT_Full |
10869 SUPPORTED_100baseT_Half |
10870 SUPPORTED_100baseT_Full |
10871 SUPPORTED_1000baseT_Full |
10872 SUPPORTED_10000baseT_Full |
10873 SUPPORTED_TP |
10874 SUPPORTED_Autoneg |
10875 SUPPORTED_Pause |
10876 SUPPORTED_Asym_Pause),
10877 .media_type = ETH_PHY_BASE_T,
10878 .ver_addr = 0,
10879 .req_flow_ctrl = 0,
10880 .req_line_speed = 0,
10881 .speed_cap_mask = 0,
10882 .req_duplex = 0,
10883 .rsrv = 0,
10884 .config_init = (config_init_t)bnx2x_8481_config_init,
10885 .read_status = (read_status_t)bnx2x_848xx_read_status,
10886 .link_reset = (link_reset_t)bnx2x_8481_link_reset,
10887 .config_loopback = (config_loopback_t)NULL,
10888 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
10889 .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
10890 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
10891 .phy_specific_func = (phy_specific_func_t)NULL
10892};
10893
10894static struct bnx2x_phy phy_84823 = {
10895 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
10896 .addr = 0xff,
10897 .def_md_devad = 0,
10898 .flags = FLAGS_FAN_FAILURE_DET_REQ |
10899 FLAGS_REARM_LATCH_SIGNAL,
10900 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10901 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10902 .mdio_ctrl = 0,
10903 .supported = (SUPPORTED_10baseT_Half |
10904 SUPPORTED_10baseT_Full |
10905 SUPPORTED_100baseT_Half |
10906 SUPPORTED_100baseT_Full |
10907 SUPPORTED_1000baseT_Full |
10908 SUPPORTED_10000baseT_Full |
10909 SUPPORTED_TP |
10910 SUPPORTED_Autoneg |
10911 SUPPORTED_Pause |
10912 SUPPORTED_Asym_Pause),
10913 .media_type = ETH_PHY_BASE_T,
10914 .ver_addr = 0,
10915 .req_flow_ctrl = 0,
10916 .req_line_speed = 0,
10917 .speed_cap_mask = 0,
10918 .req_duplex = 0,
10919 .rsrv = 0,
10920 .config_init = (config_init_t)bnx2x_848x3_config_init,
10921 .read_status = (read_status_t)bnx2x_848xx_read_status,
10922 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
10923 .config_loopback = (config_loopback_t)NULL,
10924 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
10925 .hw_reset = (hw_reset_t)NULL,
10926 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
10927 .phy_specific_func = (phy_specific_func_t)NULL
10928};
10929
10930static struct bnx2x_phy phy_84833 = {
10931 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
10932 .addr = 0xff,
10933 .def_md_devad = 0,
10934 .flags = FLAGS_FAN_FAILURE_DET_REQ |
10935 FLAGS_REARM_LATCH_SIGNAL,
10936 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10937 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10938 .mdio_ctrl = 0,
10939 .supported = (SUPPORTED_100baseT_Half |
10940 SUPPORTED_100baseT_Full |
10941 SUPPORTED_1000baseT_Full |
10942 SUPPORTED_10000baseT_Full |
10943 SUPPORTED_TP |
10944 SUPPORTED_Autoneg |
10945 SUPPORTED_Pause |
10946 SUPPORTED_Asym_Pause),
10947 .media_type = ETH_PHY_BASE_T,
10948 .ver_addr = 0,
10949 .req_flow_ctrl = 0,
10950 .req_line_speed = 0,
10951 .speed_cap_mask = 0,
10952 .req_duplex = 0,
10953 .rsrv = 0,
10954 .config_init = (config_init_t)bnx2x_848x3_config_init,
10955 .read_status = (read_status_t)bnx2x_848xx_read_status,
10956 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
10957 .config_loopback = (config_loopback_t)NULL,
10958 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
10959 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
10960 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
10961 .phy_specific_func = (phy_specific_func_t)NULL
10962};
10963
10964static struct bnx2x_phy phy_54618se = {
10965 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
10966 .addr = 0xff,
10967 .def_md_devad = 0,
10968 .flags = FLAGS_INIT_XGXS_FIRST,
10969 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10970 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10971 .mdio_ctrl = 0,
10972 .supported = (SUPPORTED_10baseT_Half |
10973 SUPPORTED_10baseT_Full |
10974 SUPPORTED_100baseT_Half |
10975 SUPPORTED_100baseT_Full |
10976 SUPPORTED_1000baseT_Full |
10977 SUPPORTED_TP |
10978 SUPPORTED_Autoneg |
10979 SUPPORTED_Pause |
10980 SUPPORTED_Asym_Pause),
10981 .media_type = ETH_PHY_BASE_T,
10982 .ver_addr = 0,
10983 .req_flow_ctrl = 0,
10984 .req_line_speed = 0,
10985 .speed_cap_mask = 0,
10986 /* req_duplex = */0,
10987 /* rsrv = */0,
10988 .config_init = (config_init_t)bnx2x_54618se_config_init,
10989 .read_status = (read_status_t)bnx2x_54618se_read_status,
10990 .link_reset = (link_reset_t)bnx2x_54618se_link_reset,
10991 .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
10992 .format_fw_ver = (format_fw_ver_t)NULL,
10993 .hw_reset = (hw_reset_t)NULL,
10994 .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led,
10995 .phy_specific_func = (phy_specific_func_t)NULL
10996};
10997/*****************************************************************/
10998/* */
10999/* Populate the phy according. Main function: bnx2x_populate_phy */
11000/* */
11001/*****************************************************************/
11002
11003static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
11004 struct bnx2x_phy *phy, u8 port,
11005 u8 phy_index)
11006{
11007 /* Get the 4 lanes xgxs config rx and tx */
11008 u32 rx = 0, tx = 0, i;
11009 for (i = 0; i < 2; i++) {
11010 /*
11011 * INT_PHY and EXT_PHY1 share the same value location in the
11012 * shmem. When num_phys is greater than 1, than this value
11013 * applies only to EXT_PHY1
11014 */
11015 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
11016 rx = REG_RD(bp, shmem_base +
11017 offsetof(struct shmem_region,
11018 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
11019
11020 tx = REG_RD(bp, shmem_base +
11021 offsetof(struct shmem_region,
11022 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
11023 } else {
11024 rx = REG_RD(bp, shmem_base +
11025 offsetof(struct shmem_region,
11026 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
11027
11028 tx = REG_RD(bp, shmem_base +
11029 offsetof(struct shmem_region,
11030 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
11031 }
11032
11033 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
11034 phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
11035
11036 phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
11037 phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
11038 }
11039}
11040
11041static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
11042 u8 phy_index, u8 port)
11043{
11044 u32 ext_phy_config = 0;
11045 switch (phy_index) {
11046 case EXT_PHY1:
11047 ext_phy_config = REG_RD(bp, shmem_base +
11048 offsetof(struct shmem_region,
11049 dev_info.port_hw_config[port].external_phy_config));
11050 break;
11051 case EXT_PHY2:
11052 ext_phy_config = REG_RD(bp, shmem_base +
11053 offsetof(struct shmem_region,
11054 dev_info.port_hw_config[port].external_phy_config2));
11055 break;
11056 default:
11057 DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
11058 return -EINVAL;
11059 }
11060
11061 return ext_phy_config;
11062}
11063static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11064 struct bnx2x_phy *phy)
11065{
11066 u32 phy_addr;
11067 u32 chip_id;
11068 u32 switch_cfg = (REG_RD(bp, shmem_base +
11069 offsetof(struct shmem_region,
11070 dev_info.port_feature_config[port].link_config)) &
11071 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11072 chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
11073 DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
11074 if (USES_WARPCORE(bp)) {
11075 u32 serdes_net_if;
11076 phy_addr = REG_RD(bp,
11077 MISC_REG_WC0_CTRL_PHY_ADDR);
11078 *phy = phy_warpcore;
11079 if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
11080 phy->flags |= FLAGS_4_PORT_MODE;
11081 else
11082 phy->flags &= ~FLAGS_4_PORT_MODE;
11083 /* Check Dual mode */
11084 serdes_net_if = (REG_RD(bp, shmem_base +
11085 offsetof(struct shmem_region, dev_info.
11086 port_hw_config[port].default_cfg)) &
11087 PORT_HW_CFG_NET_SERDES_IF_MASK);
11088 /*
11089 * Set the appropriate supported and flags indications per
11090 * interface type of the chip
11091 */
11092 switch (serdes_net_if) {
11093 case PORT_HW_CFG_NET_SERDES_IF_SGMII:
11094 phy->supported &= (SUPPORTED_10baseT_Half |
11095 SUPPORTED_10baseT_Full |
11096 SUPPORTED_100baseT_Half |
11097 SUPPORTED_100baseT_Full |
11098 SUPPORTED_1000baseT_Full |
11099 SUPPORTED_FIBRE |
11100 SUPPORTED_Autoneg |
11101 SUPPORTED_Pause |
11102 SUPPORTED_Asym_Pause);
11103 phy->media_type = ETH_PHY_BASE_T;
11104 break;
11105 case PORT_HW_CFG_NET_SERDES_IF_XFI:
11106 phy->media_type = ETH_PHY_XFP_FIBER;
11107 break;
11108 case PORT_HW_CFG_NET_SERDES_IF_SFI:
11109 phy->supported &= (SUPPORTED_1000baseT_Full |
11110 SUPPORTED_10000baseT_Full |
11111 SUPPORTED_FIBRE |
11112 SUPPORTED_Pause |
11113 SUPPORTED_Asym_Pause);
11114 phy->media_type = ETH_PHY_SFP_FIBER;
11115 break;
11116 case PORT_HW_CFG_NET_SERDES_IF_KR:
11117 phy->media_type = ETH_PHY_KR;
11118 phy->supported &= (SUPPORTED_1000baseT_Full |
11119 SUPPORTED_10000baseT_Full |
11120 SUPPORTED_FIBRE |
11121 SUPPORTED_Autoneg |
11122 SUPPORTED_Pause |
11123 SUPPORTED_Asym_Pause);
11124 break;
11125 case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
11126 phy->media_type = ETH_PHY_KR;
11127 phy->flags |= FLAGS_WC_DUAL_MODE;
11128 phy->supported &= (SUPPORTED_20000baseMLD2_Full |
11129 SUPPORTED_FIBRE |
11130 SUPPORTED_Pause |
11131 SUPPORTED_Asym_Pause);
11132 break;
11133 case PORT_HW_CFG_NET_SERDES_IF_KR2:
11134 phy->media_type = ETH_PHY_KR;
11135 phy->flags |= FLAGS_WC_DUAL_MODE;
11136 phy->supported &= (SUPPORTED_20000baseKR2_Full |
11137 SUPPORTED_FIBRE |
11138 SUPPORTED_Pause |
11139 SUPPORTED_Asym_Pause);
11140 break;
11141 default:
11142 DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
11143 serdes_net_if);
11144 break;
11145 }
11146
11147 /*
11148 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
11149 * was not set as expected. For B0, ECO will be enabled so there
11150 * won't be an issue there
11151 */
11152 if (CHIP_REV(bp) == CHIP_REV_Ax)
11153 phy->flags |= FLAGS_MDC_MDIO_WA;
11154 else
11155 phy->flags |= FLAGS_MDC_MDIO_WA_B0;
11156 } else {
11157 switch (switch_cfg) {
11158 case SWITCH_CFG_1G:
11159 phy_addr = REG_RD(bp,
11160 NIG_REG_SERDES0_CTRL_PHY_ADDR +
11161 port * 0x10);
11162 *phy = phy_serdes;
11163 break;
11164 case SWITCH_CFG_10G:
11165 phy_addr = REG_RD(bp,
11166 NIG_REG_XGXS0_CTRL_PHY_ADDR +
11167 port * 0x18);
11168 *phy = phy_xgxs;
11169 break;
11170 default:
11171 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
11172 return -EINVAL;
11173 }
11174 }
11175 phy->addr = (u8)phy_addr;
11176 phy->mdio_ctrl = bnx2x_get_emac_base(bp,
11177 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
11178 port);
11179 if (CHIP_IS_E2(bp))
11180 phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
11181 else
11182 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
11183
11184 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
11185 port, phy->addr, phy->mdio_ctrl);
11186
11187 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
11188 return 0;
11189}
11190
11191static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11192 u8 phy_index,
11193 u32 shmem_base,
11194 u32 shmem2_base,
11195 u8 port,
11196 struct bnx2x_phy *phy)
11197{
11198 u32 ext_phy_config, phy_type, config2;
11199 u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
11200 ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
11201 phy_index, port);
11202 phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11203 /* Select the phy type */
11204 switch (phy_type) {
11205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
11206 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
11207 *phy = phy_8073;
11208 break;
11209 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
11210 *phy = phy_8705;
11211 break;
11212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
11213 *phy = phy_8706;
11214 break;
11215 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
11216 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
11217 *phy = phy_8726;
11218 break;
11219 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
11220 /* BCM8727_NOC => BCM8727 no over current */
11221 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
11222 *phy = phy_8727;
11223 phy->flags |= FLAGS_NOC;
11224 break;
11225 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
11226 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
11227 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
11228 *phy = phy_8727;
11229 break;
11230 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
11231 *phy = phy_8481;
11232 break;
11233 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
11234 *phy = phy_84823;
11235 break;
11236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
11237 *phy = phy_84833;
11238 break;
11239 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
11240 *phy = phy_54618se;
11241 break;
11242 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
11243 *phy = phy_7101;
11244 break;
11245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
11246 *phy = phy_null;
11247 return -EINVAL;
11248 default:
11249 *phy = phy_null;
11250 return 0;
11251 }
11252
11253 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
11254 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
11255
11256 /*
11257 * The shmem address of the phy version is located on different
11258 * structures. In case this structure is too old, do not set
11259 * the address
11260 */
11261 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
11262 dev_info.shared_hw_config.config2));
11263 if (phy_index == EXT_PHY1) {
11264 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
11265 port_mb[port].ext_phy_fw_version);
11266
11267 /* Check specific mdc mdio settings */
11268 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
11269 mdc_mdio_access = config2 &
11270 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
11271 } else {
11272 u32 size = REG_RD(bp, shmem2_base);
11273
11274 if (size >
11275 offsetof(struct shmem2_region, ext_phy_fw_version2)) {
11276 phy->ver_addr = shmem2_base +
11277 offsetof(struct shmem2_region,
11278 ext_phy_fw_version2[port]);
11279 }
11280 /* Check specific mdc mdio settings */
11281 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
11282 mdc_mdio_access = (config2 &
11283 SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
11284 (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
11285 SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
11286 }
11287 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
11288
11289 /*
11290 * In case mdc/mdio_access of the external phy is different than the
11291 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
11292 * to prevent one port interfere with another port's CL45 operations.
11293 */
11294 if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
11295 phy->flags |= FLAGS_HW_LOCK_REQUIRED;
11296 DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
11297 phy_type, port, phy_index);
11298 DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
11299 phy->addr, phy->mdio_ctrl);
11300 return 0;
11301}
11302
11303static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
11304 u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
11305{
11306 int status = 0;
11307 phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
11308 if (phy_index == INT_PHY)
11309 return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
11310 status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
11311 port, phy);
11312 return status;
11313}
11314
11315static void bnx2x_phy_def_cfg(struct link_params *params,
11316 struct bnx2x_phy *phy,
11317 u8 phy_index)
11318{
11319 struct bnx2x *bp = params->bp;
11320 u32 link_config;
11321 /* Populate the default phy configuration for MF mode */
11322 if (phy_index == EXT_PHY2) {
11323 link_config = REG_RD(bp, params->shmem_base +
11324 offsetof(struct shmem_region, dev_info.
11325 port_feature_config[params->port].link_config2));
11326 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
11327 offsetof(struct shmem_region,
11328 dev_info.
11329 port_hw_config[params->port].speed_capability_mask2));
11330 } else {
11331 link_config = REG_RD(bp, params->shmem_base +
11332 offsetof(struct shmem_region, dev_info.
11333 port_feature_config[params->port].link_config));
11334 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
11335 offsetof(struct shmem_region,
11336 dev_info.
11337 port_hw_config[params->port].speed_capability_mask));
11338 }
11339 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
11340 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
11341
11342 phy->req_duplex = DUPLEX_FULL;
11343 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11344 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11345 phy->req_duplex = DUPLEX_HALF;
11346 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11347 phy->req_line_speed = SPEED_10;
11348 break;
11349 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11350 phy->req_duplex = DUPLEX_HALF;
11351 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11352 phy->req_line_speed = SPEED_100;
11353 break;
11354 case PORT_FEATURE_LINK_SPEED_1G:
11355 phy->req_line_speed = SPEED_1000;
11356 break;
11357 case PORT_FEATURE_LINK_SPEED_2_5G:
11358 phy->req_line_speed = SPEED_2500;
11359 break;
11360 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11361 phy->req_line_speed = SPEED_10000;
11362 break;
11363 default:
11364 phy->req_line_speed = SPEED_AUTO_NEG;
11365 break;
11366 }
11367
11368 switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
11369 case PORT_FEATURE_FLOW_CONTROL_AUTO:
11370 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
11371 break;
11372 case PORT_FEATURE_FLOW_CONTROL_TX:
11373 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
11374 break;
11375 case PORT_FEATURE_FLOW_CONTROL_RX:
11376 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
11377 break;
11378 case PORT_FEATURE_FLOW_CONTROL_BOTH:
11379 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
11380 break;
11381 default:
11382 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
11383 break;
11384 }
11385}
11386
11387u32 bnx2x_phy_selection(struct link_params *params)
11388{
11389 u32 phy_config_swapped, prio_cfg;
11390 u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
11391
11392 phy_config_swapped = params->multi_phy_config &
11393 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
11394
11395 prio_cfg = params->multi_phy_config &
11396 PORT_HW_CFG_PHY_SELECTION_MASK;
11397
11398 if (phy_config_swapped) {
11399 switch (prio_cfg) {
11400 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11401 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
11402 break;
11403 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11404 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
11405 break;
11406 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11407 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
11408 break;
11409 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11410 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
11411 break;
11412 }
11413 } else
11414 return_cfg = prio_cfg;
11415
11416 return return_cfg;
11417}
11418
11419
11420int bnx2x_phy_probe(struct link_params *params)
11421{
11422 u8 phy_index, actual_phy_idx, link_cfg_idx;
11423 u32 phy_config_swapped, sync_offset, media_types;
11424 struct bnx2x *bp = params->bp;
11425 struct bnx2x_phy *phy;
11426 params->num_phys = 0;
11427 DP(NETIF_MSG_LINK, "Begin phy probe\n");
11428 phy_config_swapped = params->multi_phy_config &
11429 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
11430
11431 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
11432 phy_index++) {
11433 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
11434 actual_phy_idx = phy_index;
11435 if (phy_config_swapped) {
11436 if (phy_index == EXT_PHY1)
11437 actual_phy_idx = EXT_PHY2;
11438 else if (phy_index == EXT_PHY2)
11439 actual_phy_idx = EXT_PHY1;
11440 }
11441 DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
11442 " actual_phy_idx %x\n", phy_config_swapped,
11443 phy_index, actual_phy_idx);
11444 phy = &params->phy[actual_phy_idx];
11445 if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
11446 params->shmem2_base, params->port,
11447 phy) != 0) {
11448 params->num_phys = 0;
11449 DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
11450 phy_index);
11451 for (phy_index = INT_PHY;
11452 phy_index < MAX_PHYS;
11453 phy_index++)
11454 *phy = phy_null;
11455 return -EINVAL;
11456 }
11457 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
11458 break;
11459
11460 sync_offset = params->shmem_base +
11461 offsetof(struct shmem_region,
11462 dev_info.port_hw_config[params->port].media_type);
11463 media_types = REG_RD(bp, sync_offset);
11464
11465 /*
11466 * Update media type for non-PMF sync only for the first time
11467 * In case the media type changes afterwards, it will be updated
11468 * using the update_status function
11469 */
11470 if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
11471 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
11472 actual_phy_idx))) == 0) {
11473 media_types |= ((phy->media_type &
11474 PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
11475 (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
11476 actual_phy_idx));
11477 }
11478 REG_WR(bp, sync_offset, media_types);
11479
11480 bnx2x_phy_def_cfg(params, phy, phy_index);
11481 params->num_phys++;
11482 }
11483
11484 DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
11485 return 0;
11486}
11487
11488void bnx2x_init_bmac_loopback(struct link_params *params,
11489 struct link_vars *vars)
11490{
11491 struct bnx2x *bp = params->bp;
11492 vars->link_up = 1;
11493 vars->line_speed = SPEED_10000;
11494 vars->duplex = DUPLEX_FULL;
11495 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
11496 vars->mac_type = MAC_TYPE_BMAC;
11497
11498 vars->phy_flags = PHY_XGXS_FLAG;
11499
11500 bnx2x_xgxs_deassert(params);
11501
11502 /* set bmac loopback */
11503 bnx2x_bmac_enable(params, vars, 1);
11504
11505 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11506}
11507
11508void bnx2x_init_emac_loopback(struct link_params *params,
11509 struct link_vars *vars)
11510{
11511 struct bnx2x *bp = params->bp;
11512 vars->link_up = 1;
11513 vars->line_speed = SPEED_1000;
11514 vars->duplex = DUPLEX_FULL;
11515 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
11516 vars->mac_type = MAC_TYPE_EMAC;
11517
11518 vars->phy_flags = PHY_XGXS_FLAG;
11519
11520 bnx2x_xgxs_deassert(params);
11521 /* set bmac loopback */
11522 bnx2x_emac_enable(params, vars, 1);
11523 bnx2x_emac_program(params, vars);
11524 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11525}
11526
11527void bnx2x_init_xmac_loopback(struct link_params *params,
11528 struct link_vars *vars)
11529{
11530 struct bnx2x *bp = params->bp;
11531 vars->link_up = 1;
11532 if (!params->req_line_speed[0])
11533 vars->line_speed = SPEED_10000;
11534 else
11535 vars->line_speed = params->req_line_speed[0];
11536 vars->duplex = DUPLEX_FULL;
11537 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
11538 vars->mac_type = MAC_TYPE_XMAC;
11539 vars->phy_flags = PHY_XGXS_FLAG;
11540 /*
11541 * Set WC to loopback mode since link is required to provide clock
11542 * to the XMAC in 20G mode
11543 */
11544 bnx2x_set_aer_mmd(params, &params->phy[0]);
11545 bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
11546 params->phy[INT_PHY].config_loopback(
11547 &params->phy[INT_PHY],
11548 params);
11549
11550 bnx2x_xmac_enable(params, vars, 1);
11551 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11552}
11553
11554void bnx2x_init_umac_loopback(struct link_params *params,
11555 struct link_vars *vars)
11556{
11557 struct bnx2x *bp = params->bp;
11558 vars->link_up = 1;
11559 vars->line_speed = SPEED_1000;
11560 vars->duplex = DUPLEX_FULL;
11561 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
11562 vars->mac_type = MAC_TYPE_UMAC;
11563 vars->phy_flags = PHY_XGXS_FLAG;
11564 bnx2x_umac_enable(params, vars, 1);
11565
11566 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11567}
11568
11569void bnx2x_init_xgxs_loopback(struct link_params *params,
11570 struct link_vars *vars)
11571{
11572 struct bnx2x *bp = params->bp;
11573 vars->link_up = 1;
11574 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
11575 vars->duplex = DUPLEX_FULL;
11576 if (params->req_line_speed[0] == SPEED_1000)
11577 vars->line_speed = SPEED_1000;
11578 else
11579 vars->line_speed = SPEED_10000;
11580
11581 if (!USES_WARPCORE(bp))
11582 bnx2x_xgxs_deassert(params);
11583 bnx2x_link_initialize(params, vars);
11584
11585 if (params->req_line_speed[0] == SPEED_1000) {
11586 if (USES_WARPCORE(bp))
11587 bnx2x_umac_enable(params, vars, 0);
11588 else {
11589 bnx2x_emac_program(params, vars);
11590 bnx2x_emac_enable(params, vars, 0);
11591 }
11592 } else {
11593 if (USES_WARPCORE(bp))
11594 bnx2x_xmac_enable(params, vars, 0);
11595 else
11596 bnx2x_bmac_enable(params, vars, 0);
11597 }
11598
11599 if (params->loopback_mode == LOOPBACK_XGXS) {
11600 /* set 10G XGXS loopback */
11601 params->phy[INT_PHY].config_loopback(
11602 &params->phy[INT_PHY],
11603 params);
11604
11605 } else {
11606 /* set external phy loopback */
11607 u8 phy_index;
11608 for (phy_index = EXT_PHY1;
11609 phy_index < params->num_phys; phy_index++) {
11610 if (params->phy[phy_index].config_loopback)
11611 params->phy[phy_index].config_loopback(
11612 &params->phy[phy_index],
11613 params);
11614 }
11615 }
11616 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11617
11618 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
11619}
11620
11621int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
11622{
11623 struct bnx2x *bp = params->bp;
11624 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
11625 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
11626 params->req_line_speed[0], params->req_flow_ctrl[0]);
11627 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
11628 params->req_line_speed[1], params->req_flow_ctrl[1]);
11629 vars->link_status = 0;
11630 vars->phy_link_up = 0;
11631 vars->link_up = 0;
11632 vars->line_speed = 0;
11633 vars->duplex = DUPLEX_FULL;
11634 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
11635 vars->mac_type = MAC_TYPE_NONE;
11636 vars->phy_flags = 0;
11637
11638 /* disable attentions */
11639 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
11640 (NIG_MASK_XGXS0_LINK_STATUS |
11641 NIG_MASK_XGXS0_LINK10G |
11642 NIG_MASK_SERDES0_LINK_STATUS |
11643 NIG_MASK_MI_INT));
11644
11645 bnx2x_emac_init(params, vars);
11646
11647 if (params->num_phys == 0) {
11648 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
11649 return -EINVAL;
11650 }
11651 set_phy_vars(params, vars);
11652
11653 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
11654 switch (params->loopback_mode) {
11655 case LOOPBACK_BMAC:
11656 bnx2x_init_bmac_loopback(params, vars);
11657 break;
11658 case LOOPBACK_EMAC:
11659 bnx2x_init_emac_loopback(params, vars);
11660 break;
11661 case LOOPBACK_XMAC:
11662 bnx2x_init_xmac_loopback(params, vars);
11663 break;
11664 case LOOPBACK_UMAC:
11665 bnx2x_init_umac_loopback(params, vars);
11666 break;
11667 case LOOPBACK_XGXS:
11668 case LOOPBACK_EXT_PHY:
11669 bnx2x_init_xgxs_loopback(params, vars);
11670 break;
11671 default:
11672 if (!CHIP_IS_E3(bp)) {
11673 if (params->switch_cfg == SWITCH_CFG_10G)
11674 bnx2x_xgxs_deassert(params);
11675 else
11676 bnx2x_serdes_deassert(bp, params->port);
11677 }
11678 bnx2x_link_initialize(params, vars);
11679 msleep(30);
11680 bnx2x_link_int_enable(params);
11681 break;
11682 }
11683 return 0;
11684}
11685
11686int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
11687 u8 reset_ext_phy)
11688{
11689 struct bnx2x *bp = params->bp;
11690 u8 phy_index, port = params->port, clear_latch_ind = 0;
11691 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
11692 /* disable attentions */
11693 vars->link_status = 0;
11694 bnx2x_update_mng(params, vars->link_status);
11695 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
11696 (NIG_MASK_XGXS0_LINK_STATUS |
11697 NIG_MASK_XGXS0_LINK10G |
11698 NIG_MASK_SERDES0_LINK_STATUS |
11699 NIG_MASK_MI_INT));
11700
11701 /* activate nig drain */
11702 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
11703
11704 /* disable nig egress interface */
11705 if (!CHIP_IS_E3(bp)) {
11706 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
11707 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
11708 }
11709
11710 /* Stop BigMac rx */
11711 if (!CHIP_IS_E3(bp))
11712 bnx2x_bmac_rx_disable(bp, port);
11713 else
11714 bnx2x_xmac_disable(params);
11715 /* disable emac */
11716 if (!CHIP_IS_E3(bp))
11717 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
11718
11719 msleep(10);
11720 /* The PHY reset is controlled by GPIO 1
11721 * Hold it as vars low
11722 */
11723 /* clear link led */
11724 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
11725
11726 if (reset_ext_phy) {
11727 bnx2x_set_mdio_clk(bp, params->chip_id, port);
11728 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
11729 phy_index++) {
11730 if (params->phy[phy_index].link_reset) {
11731 bnx2x_set_aer_mmd(params,
11732 &params->phy[phy_index]);
11733 params->phy[phy_index].link_reset(
11734 &params->phy[phy_index],
11735 params);
11736 }
11737 if (params->phy[phy_index].flags &
11738 FLAGS_REARM_LATCH_SIGNAL)
11739 clear_latch_ind = 1;
11740 }
11741 }
11742
11743 if (clear_latch_ind) {
11744 /* Clear latching indication */
11745 bnx2x_rearm_latch_signal(bp, port, 0);
11746 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
11747 1 << NIG_LATCH_BC_ENABLE_MI_INT);
11748 }
11749 if (params->phy[INT_PHY].link_reset)
11750 params->phy[INT_PHY].link_reset(
11751 &params->phy[INT_PHY], params);
11752 /* reset BigMac */
11753 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11754 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
11755
11756 /* disable nig ingress interface */
11757 if (!CHIP_IS_E3(bp)) {
11758 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
11759 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
11760 }
11761 vars->link_up = 0;
11762 vars->phy_flags = 0;
11763 return 0;
11764}
11765
11766/****************************************************************************/
11767/* Common function */
11768/****************************************************************************/
11769static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
11770 u32 shmem_base_path[],
11771 u32 shmem2_base_path[], u8 phy_index,
11772 u32 chip_id)
11773{
11774 struct bnx2x_phy phy[PORT_MAX];
11775 struct bnx2x_phy *phy_blk[PORT_MAX];
11776 u16 val;
11777 s8 port = 0;
11778 s8 port_of_path = 0;
11779 u32 swap_val, swap_override;
11780 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
11781 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
11782 port ^= (swap_val && swap_override);
11783 bnx2x_ext_phy_hw_reset(bp, port);
11784 /* PART1 - Reset both phys */
11785 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
11786 u32 shmem_base, shmem2_base;
11787 /* In E2, same phy is using for port0 of the two paths */
11788 if (CHIP_IS_E1x(bp)) {
11789 shmem_base = shmem_base_path[0];
11790 shmem2_base = shmem2_base_path[0];
11791 port_of_path = port;
11792 } else {
11793 shmem_base = shmem_base_path[port];
11794 shmem2_base = shmem2_base_path[port];
11795 port_of_path = 0;
11796 }
11797
11798 /* Extract the ext phy address for the port */
11799 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
11800 port_of_path, &phy[port]) !=
11801 0) {
11802 DP(NETIF_MSG_LINK, "populate_phy failed\n");
11803 return -EINVAL;
11804 }
11805 /* disable attentions */
11806 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
11807 port_of_path*4,
11808 (NIG_MASK_XGXS0_LINK_STATUS |
11809 NIG_MASK_XGXS0_LINK10G |
11810 NIG_MASK_SERDES0_LINK_STATUS |
11811 NIG_MASK_MI_INT));
11812
11813 /* Need to take the phy out of low power mode in order
11814 to write to access its registers */
11815 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
11816 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
11817 port);
11818
11819 /* Reset the phy */
11820 bnx2x_cl45_write(bp, &phy[port],
11821 MDIO_PMA_DEVAD,
11822 MDIO_PMA_REG_CTRL,
11823 1<<15);
11824 }
11825
11826 /* Add delay of 150ms after reset */
11827 msleep(150);
11828
11829 if (phy[PORT_0].addr & 0x1) {
11830 phy_blk[PORT_0] = &(phy[PORT_1]);
11831 phy_blk[PORT_1] = &(phy[PORT_0]);
11832 } else {
11833 phy_blk[PORT_0] = &(phy[PORT_0]);
11834 phy_blk[PORT_1] = &(phy[PORT_1]);
11835 }
11836
11837 /* PART2 - Download firmware to both phys */
11838 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
11839 if (CHIP_IS_E1x(bp))
11840 port_of_path = port;
11841 else
11842 port_of_path = 0;
11843
11844 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
11845 phy_blk[port]->addr);
11846 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
11847 port_of_path))
11848 return -EINVAL;
11849
11850 /* Only set bit 10 = 1 (Tx power down) */
11851 bnx2x_cl45_read(bp, phy_blk[port],
11852 MDIO_PMA_DEVAD,
11853 MDIO_PMA_REG_TX_POWER_DOWN, &val);
11854
11855 /* Phase1 of TX_POWER_DOWN reset */
11856 bnx2x_cl45_write(bp, phy_blk[port],
11857 MDIO_PMA_DEVAD,
11858 MDIO_PMA_REG_TX_POWER_DOWN,
11859 (val | 1<<10));
11860 }
11861
11862 /*
11863 * Toggle Transmitter: Power down and then up with 600ms delay
11864 * between
11865 */
11866 msleep(600);
11867
11868 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
11869 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
11870 /* Phase2 of POWER_DOWN_RESET */
11871 /* Release bit 10 (Release Tx power down) */
11872 bnx2x_cl45_read(bp, phy_blk[port],
11873 MDIO_PMA_DEVAD,
11874 MDIO_PMA_REG_TX_POWER_DOWN, &val);
11875
11876 bnx2x_cl45_write(bp, phy_blk[port],
11877 MDIO_PMA_DEVAD,
11878 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
11879 msleep(15);
11880
11881 /* Read modify write the SPI-ROM version select register */
11882 bnx2x_cl45_read(bp, phy_blk[port],
11883 MDIO_PMA_DEVAD,
11884 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
11885 bnx2x_cl45_write(bp, phy_blk[port],
11886 MDIO_PMA_DEVAD,
11887 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
11888
11889 /* set GPIO2 back to LOW */
11890 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
11891 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
11892 }
11893 return 0;
11894}
11895static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
11896 u32 shmem_base_path[],
11897 u32 shmem2_base_path[], u8 phy_index,
11898 u32 chip_id)
11899{
11900 u32 val;
11901 s8 port;
11902 struct bnx2x_phy phy;
11903 /* Use port1 because of the static port-swap */
11904 /* Enable the module detection interrupt */
11905 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
11906 val |= ((1<<MISC_REGISTERS_GPIO_3)|
11907 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
11908 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
11909
11910 bnx2x_ext_phy_hw_reset(bp, 0);
11911 msleep(5);
11912 for (port = 0; port < PORT_MAX; port++) {
11913 u32 shmem_base, shmem2_base;
11914
11915 /* In E2, same phy is using for port0 of the two paths */
11916 if (CHIP_IS_E1x(bp)) {
11917 shmem_base = shmem_base_path[0];
11918 shmem2_base = shmem2_base_path[0];
11919 } else {
11920 shmem_base = shmem_base_path[port];
11921 shmem2_base = shmem2_base_path[port];
11922 }
11923 /* Extract the ext phy address for the port */
11924 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
11925 port, &phy) !=
11926 0) {
11927 DP(NETIF_MSG_LINK, "populate phy failed\n");
11928 return -EINVAL;
11929 }
11930
11931 /* Reset phy*/
11932 bnx2x_cl45_write(bp, &phy,
11933 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
11934
11935
11936 /* Set fault module detected LED on */
11937 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
11938 MISC_REGISTERS_GPIO_HIGH,
11939 port);
11940 }
11941
11942 return 0;
11943}
11944static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
11945 u8 *io_gpio, u8 *io_port)
11946{
11947
11948 u32 phy_gpio_reset = REG_RD(bp, shmem_base +
11949 offsetof(struct shmem_region,
11950 dev_info.port_hw_config[PORT_0].default_cfg));
11951 switch (phy_gpio_reset) {
11952 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
11953 *io_gpio = 0;
11954 *io_port = 0;
11955 break;
11956 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
11957 *io_gpio = 1;
11958 *io_port = 0;
11959 break;
11960 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
11961 *io_gpio = 2;
11962 *io_port = 0;
11963 break;
11964 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
11965 *io_gpio = 3;
11966 *io_port = 0;
11967 break;
11968 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
11969 *io_gpio = 0;
11970 *io_port = 1;
11971 break;
11972 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
11973 *io_gpio = 1;
11974 *io_port = 1;
11975 break;
11976 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
11977 *io_gpio = 2;
11978 *io_port = 1;
11979 break;
11980 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
11981 *io_gpio = 3;
11982 *io_port = 1;
11983 break;
11984 default:
11985 /* Don't override the io_gpio and io_port */
11986 break;
11987 }
11988}
11989
11990static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
11991 u32 shmem_base_path[],
11992 u32 shmem2_base_path[], u8 phy_index,
11993 u32 chip_id)
11994{
11995 s8 port, reset_gpio;
11996 u32 swap_val, swap_override;
11997 struct bnx2x_phy phy[PORT_MAX];
11998 struct bnx2x_phy *phy_blk[PORT_MAX];
11999 s8 port_of_path;
12000 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
12001 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
12002
12003 reset_gpio = MISC_REGISTERS_GPIO_1;
12004 port = 1;
12005
12006 /*
12007 * Retrieve the reset gpio/port which control the reset.
12008 * Default is GPIO1, PORT1
12009 */
12010 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
12011 (u8 *)&reset_gpio, (u8 *)&port);
12012
12013 /* Calculate the port based on port swap */
12014 port ^= (swap_val && swap_override);
12015
12016 /* Initiate PHY reset*/
12017 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
12018 port);
12019 msleep(1);
12020 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
12021 port);
12022
12023 msleep(5);
12024
12025 /* PART1 - Reset both phys */
12026 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
12027 u32 shmem_base, shmem2_base;
12028
12029 /* In E2, same phy is using for port0 of the two paths */
12030 if (CHIP_IS_E1x(bp)) {
12031 shmem_base = shmem_base_path[0];
12032 shmem2_base = shmem2_base_path[0];
12033 port_of_path = port;
12034 } else {
12035 shmem_base = shmem_base_path[port];
12036 shmem2_base = shmem2_base_path[port];
12037 port_of_path = 0;
12038 }
12039
12040 /* Extract the ext phy address for the port */
12041 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
12042 port_of_path, &phy[port]) !=
12043 0) {
12044 DP(NETIF_MSG_LINK, "populate phy failed\n");
12045 return -EINVAL;
12046 }
12047 /* disable attentions */
12048 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
12049 port_of_path*4,
12050 (NIG_MASK_XGXS0_LINK_STATUS |
12051 NIG_MASK_XGXS0_LINK10G |
12052 NIG_MASK_SERDES0_LINK_STATUS |
12053 NIG_MASK_MI_INT));
12054
12055
12056 /* Reset the phy */
12057 bnx2x_cl45_write(bp, &phy[port],
12058 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
12059 }
12060
12061 /* Add delay of 150ms after reset */
12062 msleep(150);
12063 if (phy[PORT_0].addr & 0x1) {
12064 phy_blk[PORT_0] = &(phy[PORT_1]);
12065 phy_blk[PORT_1] = &(phy[PORT_0]);
12066 } else {
12067 phy_blk[PORT_0] = &(phy[PORT_0]);
12068 phy_blk[PORT_1] = &(phy[PORT_1]);
12069 }
12070 /* PART2 - Download firmware to both phys */
12071 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
12072 if (CHIP_IS_E1x(bp))
12073 port_of_path = port;
12074 else
12075 port_of_path = 0;
12076 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
12077 phy_blk[port]->addr);
12078 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
12079 port_of_path))
12080 return -EINVAL;
12081 /* Disable PHY transmitter output */
12082 bnx2x_cl45_write(bp, phy_blk[port],
12083 MDIO_PMA_DEVAD,
12084 MDIO_PMA_REG_TX_DISABLE, 1);
12085
12086 }
12087 return 0;
12088}
12089
12090static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12091 u32 shmem2_base_path[], u8 phy_index,
12092 u32 ext_phy_type, u32 chip_id)
12093{
12094 int rc = 0;
12095
12096 switch (ext_phy_type) {
12097 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
12098 rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
12099 shmem2_base_path,
12100 phy_index, chip_id);
12101 break;
12102 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
12103 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
12104 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
12105 rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
12106 shmem2_base_path,
12107 phy_index, chip_id);
12108 break;
12109
12110 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12111 /*
12112 * GPIO1 affects both ports, so there's need to pull
12113 * it for single port alone
12114 */
12115 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
12116 shmem2_base_path,
12117 phy_index, chip_id);
12118 break;
12119 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
12120 /*
12121 * GPIO3's are linked, and so both need to be toggled
12122 * to obtain required 2us pulse.
12123 */
12124 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
12125 break;
12126 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
12127 rc = -EINVAL;
12128 break;
12129 default:
12130 DP(NETIF_MSG_LINK,
12131 "ext_phy 0x%x common init not required\n",
12132 ext_phy_type);
12133 break;
12134 }
12135
12136 if (rc != 0)
12137 netdev_err(bp->dev, "Warning: PHY was not initialized,"
12138 " Port %d\n",
12139 0);
12140 return rc;
12141}
12142
12143int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
12144 u32 shmem2_base_path[], u32 chip_id)
12145{
12146 int rc = 0;
12147 u32 phy_ver, val;
12148 u8 phy_index = 0;
12149 u32 ext_phy_type, ext_phy_config;
12150 bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
12151 bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
12152 DP(NETIF_MSG_LINK, "Begin common phy init\n");
12153 if (CHIP_IS_E3(bp)) {
12154 /* Enable EPIO */
12155 val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
12156 REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
12157 }
12158 /* Check if common init was already done */
12159 phy_ver = REG_RD(bp, shmem_base_path[0] +
12160 offsetof(struct shmem_region,
12161 port_mb[PORT_0].ext_phy_fw_version));
12162 if (phy_ver) {
12163 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
12164 phy_ver);
12165 return 0;
12166 }
12167
12168 /* Read the ext_phy_type for arbitrary port(0) */
12169 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
12170 phy_index++) {
12171 ext_phy_config = bnx2x_get_ext_phy_config(bp,
12172 shmem_base_path[0],
12173 phy_index, 0);
12174 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
12175 rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
12176 shmem2_base_path,
12177 phy_index, ext_phy_type,
12178 chip_id);
12179 }
12180 return rc;
12181}
12182
12183static void bnx2x_check_over_curr(struct link_params *params,
12184 struct link_vars *vars)
12185{
12186 struct bnx2x *bp = params->bp;
12187 u32 cfg_pin;
12188 u8 port = params->port;
12189 u32 pin_val;
12190
12191 cfg_pin = (REG_RD(bp, params->shmem_base +
12192 offsetof(struct shmem_region,
12193 dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
12194 PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
12195 PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
12196
12197 /* Ignore check if no external input PIN available */
12198 if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
12199 return;
12200
12201 if (!pin_val) {
12202 if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
12203 netdev_err(bp->dev, "Error: Power fault on Port %d has"
12204 " been detected and the power to "
12205 "that SFP+ module has been removed"
12206 " to prevent failure of the card."
12207 " Please remove the SFP+ module and"
12208 " restart the system to clear this"
12209 " error.\n",
12210 params->port);
12211 vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
12212 }
12213 } else
12214 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
12215}
12216
12217static void bnx2x_analyze_link_error(struct link_params *params,
12218 struct link_vars *vars, u32 lss_status)
12219{
12220 struct bnx2x *bp = params->bp;
12221 /* Compare new value with previous value */
12222 u8 led_mode;
12223 u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
12224
12225 if ((lss_status ^ half_open_conn) == 0)
12226 return;
12227
12228 /* If values differ */
12229 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
12230 half_open_conn, lss_status);
12231
12232 /*
12233 * a. Update shmem->link_status accordingly
12234 * b. Update link_vars->link_up
12235 */
12236 if (lss_status) {
12237 DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
12238 vars->link_status &= ~LINK_STATUS_LINK_UP;
12239 vars->link_up = 0;
12240 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
12241 /*
12242 * Set LED mode to off since the PHY doesn't know about these
12243 * errors
12244 */
12245 led_mode = LED_MODE_OFF;
12246 } else {
12247 DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
12248 vars->link_status |= LINK_STATUS_LINK_UP;
12249 vars->link_up = 1;
12250 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
12251 led_mode = LED_MODE_OPER;
12252 }
12253 /* Update the LED according to the link state */
12254 bnx2x_set_led(params, vars, led_mode, SPEED_10000);
12255
12256 /* Update link status in the shared memory */
12257 bnx2x_update_mng(params, vars->link_status);
12258
12259 /* C. Trigger General Attention */
12260 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
12261 bnx2x_notify_link_changed(bp);
12262}
12263
12264/******************************************************************************
12265* Description:
12266* This function checks for half opened connection change indication.
12267* When such change occurs, it calls the bnx2x_analyze_link_error
12268* to check if Remote Fault is set or cleared. Reception of remote fault
12269* status message in the MAC indicates that the peer's MAC has detected
12270* a fault, for example, due to break in the TX side of fiber.
12271*
12272******************************************************************************/
12273static void bnx2x_check_half_open_conn(struct link_params *params,
12274 struct link_vars *vars)
12275{
12276 struct bnx2x *bp = params->bp;
12277 u32 lss_status = 0;
12278 u32 mac_base;
12279 /* In case link status is physically up @ 10G do */
12280 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
12281 return;
12282
12283 if (CHIP_IS_E3(bp) &&
12284 (REG_RD(bp, MISC_REG_RESET_REG_2) &
12285 (MISC_REGISTERS_RESET_REG_2_XMAC))) {
12286 /* Check E3 XMAC */
12287 /*
12288 * Note that link speed cannot be queried here, since it may be
12289 * zero while link is down. In case UMAC is active, LSS will
12290 * simply not be set
12291 */
12292 mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
12293
12294 /* Clear stick bits (Requires rising edge) */
12295 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
12296 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
12297 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
12298 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
12299 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
12300 lss_status = 1;
12301
12302 bnx2x_analyze_link_error(params, vars, lss_status);
12303 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
12304 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
12305 /* Check E1X / E2 BMAC */
12306 u32 lss_status_reg;
12307 u32 wb_data[2];
12308 mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
12309 NIG_REG_INGRESS_BMAC0_MEM;
12310 /* Read BIGMAC_REGISTER_RX_LSS_STATUS */
12311 if (CHIP_IS_E2(bp))
12312 lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
12313 else
12314 lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
12315
12316 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
12317 lss_status = (wb_data[0] > 0);
12318
12319 bnx2x_analyze_link_error(params, vars, lss_status);
12320 }
12321}
12322
12323void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12324{
12325 struct bnx2x *bp = params->bp;
12326 u16 phy_idx;
12327 if (!params) {
12328 DP(NETIF_MSG_LINK, "Uninitialized params !\n");
12329 return;
12330 }
12331
12332 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
12333 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
12334 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
12335 bnx2x_check_half_open_conn(params, vars);
12336 break;
12337 }
12338 }
12339
12340 if (CHIP_IS_E3(bp))
12341 bnx2x_check_over_curr(params, vars);
12342}
12343
12344u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
12345{
12346 u8 phy_index;
12347 struct bnx2x_phy phy;
12348 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
12349 phy_index++) {
12350 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
12351 0, &phy) != 0) {
12352 DP(NETIF_MSG_LINK, "populate phy failed\n");
12353 return 0;
12354 }
12355
12356 if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
12357 return 1;
12358 }
12359 return 0;
12360}
12361
12362u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
12363 u32 shmem_base,
12364 u32 shmem2_base,
12365 u8 port)
12366{
12367 u8 phy_index, fan_failure_det_req = 0;
12368 struct bnx2x_phy phy;
12369 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
12370 phy_index++) {
12371 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
12372 port, &phy)
12373 != 0) {
12374 DP(NETIF_MSG_LINK, "populate phy failed\n");
12375 return 0;
12376 }
12377 fan_failure_det_req |= (phy.flags &
12378 FLAGS_FAN_FAILURE_DET_REQ);
12379 }
12380 return fan_failure_det_req;
12381}
12382
12383void bnx2x_hw_reset_phy(struct link_params *params)
12384{
12385 u8 phy_index;
12386 struct bnx2x *bp = params->bp;
12387 bnx2x_update_mng(params, 0);
12388 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
12389 (NIG_MASK_XGXS0_LINK_STATUS |
12390 NIG_MASK_XGXS0_LINK10G |
12391 NIG_MASK_SERDES0_LINK_STATUS |
12392 NIG_MASK_MI_INT));
12393
12394 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
12395 phy_index++) {
12396 if (params->phy[phy_index].hw_reset) {
12397 params->phy[phy_index].hw_reset(
12398 &params->phy[phy_index],
12399 params);
12400 params->phy[phy_index] = phy_null;
12401 }
12402 }
12403}
12404
12405void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
12406 u32 chip_id, u32 shmem_base, u32 shmem2_base,
12407 u8 port)
12408{
12409 u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
12410 u32 val;
12411 u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
12412 if (CHIP_IS_E3(bp)) {
12413 if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
12414 shmem_base,
12415 port,
12416 &gpio_num,
12417 &gpio_port) != 0)
12418 return;
12419 } else {
12420 struct bnx2x_phy phy;
12421 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
12422 phy_index++) {
12423 if (bnx2x_populate_phy(bp, phy_index, shmem_base,
12424 shmem2_base, port, &phy)
12425 != 0) {
12426 DP(NETIF_MSG_LINK, "populate phy failed\n");
12427 return;
12428 }
12429 if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
12430 gpio_num = MISC_REGISTERS_GPIO_3;
12431 gpio_port = port;
12432 break;
12433 }
12434 }
12435 }
12436
12437 if (gpio_num == 0xff)
12438 return;
12439
12440 /* Set GPIO3 to trigger SFP+ module insertion/removal */
12441 bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
12442
12443 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
12444 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
12445 gpio_port ^= (swap_val && swap_override);
12446
12447 vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
12448 (gpio_num + (gpio_port << 2));
12449
12450 sync_offset = shmem_base +
12451 offsetof(struct shmem_region,
12452 dev_info.port_hw_config[port].aeu_int_mask);
12453 REG_WR(bp, sync_offset, vars->aeu_int_mask);
12454
12455 DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
12456 gpio_num, gpio_port, vars->aeu_int_mask);
12457
12458 if (port == 0)
12459 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
12460 else
12461 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
12462
12463 /* Open appropriate AEU for interrupts */
12464 aeu_mask = REG_RD(bp, offset);
12465 aeu_mask |= vars->aeu_int_mask;
12466 REG_WR(bp, offset, aeu_mask);
12467
12468 /* Enable the GPIO to trigger interrupt */
12469 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
12470 val |= 1 << (gpio_num + (gpio_port << 2));
12471 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
12472}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
new file mode 100644
index 00000000000..c12db6da213
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -0,0 +1,493 @@
1/* Copyright 2008-2011 Broadcom Corporation
2 *
3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you
5 * under the terms of the GNU General Public License version 2, available
6 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
7 *
8 * Notwithstanding the above, under no circumstances may you combine this
9 * software in any way with any other Broadcom software provided under a
10 * license other than the GPL, without Broadcom's express prior written
11 * consent.
12 *
13 * Written by Yaniv Rosner
14 *
15 */
16
17#ifndef BNX2X_LINK_H
18#define BNX2X_LINK_H
19
20
21
22/***********************************************************/
23/* Defines */
24/***********************************************************/
25#define DEFAULT_PHY_DEV_ADDR 3
26#define E2_DEFAULT_PHY_DEV_ADDR 5
27
28
29
30#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
31#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
32#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
35
36#define NET_SERDES_IF_XFI 1
37#define NET_SERDES_IF_SFI 2
38#define NET_SERDES_IF_KR 3
39#define NET_SERDES_IF_DXGXS 4
40
41#define SPEED_AUTO_NEG 0
42#define SPEED_20000 20000
43
44#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
45#define SFP_EEPROM_VENDOR_NAME_SIZE 16
46#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
47#define SFP_EEPROM_VENDOR_OUI_SIZE 3
48#define SFP_EEPROM_PART_NO_ADDR 0x28
49#define SFP_EEPROM_PART_NO_SIZE 16
50#define SFP_EEPROM_REVISION_ADDR 0x38
51#define SFP_EEPROM_REVISION_SIZE 4
52#define SFP_EEPROM_SERIAL_ADDR 0x44
53#define SFP_EEPROM_SERIAL_SIZE 16
54#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
55#define SFP_EEPROM_DATE_SIZE 6
56#define PWR_FLT_ERR_MSG_LEN 250
57
58#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
59 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
60#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
61 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
62 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
63#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
64 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
65
66/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
67#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
68/* Single Media board contains single external phy */
69#define SINGLE_MEDIA(params) (params->num_phys == 2)
70/* Dual Media board contains two external phy with different media */
71#define DUAL_MEDIA(params) (params->num_phys == 3)
72
73#define FW_PARAM_PHY_ADDR_MASK 0x000000FF
74#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00
75#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
76#define FW_PARAM_MDIO_CTRL_OFFSET 16
77#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
78 FW_PARAM_PHY_ADDR_MASK)
79#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
80 FW_PARAM_PHY_TYPE_MASK)
81#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
82 FW_PARAM_MDIO_CTRL_MASK) >> \
83 FW_PARAM_MDIO_CTRL_OFFSET)
84#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
85 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
86
87
88#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
89#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
90
91#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
92/***********************************************************/
93/* Structs */
94/***********************************************************/
95#define INT_PHY 0
96#define EXT_PHY1 1
97#define EXT_PHY2 2
98#define MAX_PHYS 3
99
100/* Same configuration is shared between the XGXS and the first external phy */
101#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
102#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
103 0 : (_phy_idx - 1))
104/***********************************************************/
105/* bnx2x_phy struct */
106/* Defines the required arguments and function per phy */
107/***********************************************************/
108struct link_vars;
109struct link_params;
110struct bnx2x_phy;
111
112typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
113 struct link_vars *vars);
114typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
115 struct link_vars *vars);
116typedef void (*link_reset_t)(struct bnx2x_phy *phy,
117 struct link_params *params);
118typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
119 struct link_params *params);
120typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
121typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
122typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
123 struct link_params *params, u8 mode);
124typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
125 struct link_params *params, u32 action);
126
127struct bnx2x_phy {
128 u32 type;
129
130 /* Loaded during init */
131 u8 addr;
132 u8 def_md_devad;
133 u16 flags;
134 /* Require HW lock */
135#define FLAGS_HW_LOCK_REQUIRED (1<<0)
136 /* No Over-Current detection */
137#define FLAGS_NOC (1<<1)
138 /* Fan failure detection required */
139#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
140 /* Initialize first the XGXS and only then the phy itself */
141#define FLAGS_INIT_XGXS_FIRST (1<<3)
142#define FLAGS_WC_DUAL_MODE (1<<4)
143#define FLAGS_4_PORT_MODE (1<<5)
144#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
145#define FLAGS_SFP_NOT_APPROVED (1<<7)
146#define FLAGS_MDC_MDIO_WA (1<<8)
147#define FLAGS_DUMMY_READ (1<<9)
148#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
149#define FLAGS_TX_ERROR_CHECK (1<<12)
150
151 /* preemphasis values for the rx side */
152 u16 rx_preemphasis[4];
153
154 /* preemphasis values for the tx side */
155 u16 tx_preemphasis[4];
156
157 /* EMAC address for access MDIO */
158 u32 mdio_ctrl;
159
160 u32 supported;
161
162 u32 media_type;
163#define ETH_PHY_UNSPECIFIED 0x0
164#define ETH_PHY_SFP_FIBER 0x1
165#define ETH_PHY_XFP_FIBER 0x2
166#define ETH_PHY_DA_TWINAX 0x3
167#define ETH_PHY_BASE_T 0x4
168#define ETH_PHY_KR 0xf0
169#define ETH_PHY_CX4 0xf1
170#define ETH_PHY_NOT_PRESENT 0xff
171
172 /* The address in which version is located*/
173 u32 ver_addr;
174
175 u16 req_flow_ctrl;
176
177 u16 req_line_speed;
178
179 u32 speed_cap_mask;
180
181 u16 req_duplex;
182 u16 rsrv;
183 /* Called per phy/port init, and it configures LASI, speed, autoneg,
184 duplex, flow control negotiation, etc. */
185 config_init_t config_init;
186
187 /* Called due to interrupt. It determines the link, speed */
188 read_status_t read_status;
189
190 /* Called when driver is unloading. Should reset the phy */
191 link_reset_t link_reset;
192
193 /* Set the loopback configuration for the phy */
194 config_loopback_t config_loopback;
195
196 /* Format the given raw number into str up to len */
197 format_fw_ver_t format_fw_ver;
198
199 /* Reset the phy (both ports) */
200 hw_reset_t hw_reset;
201
202 /* Set link led mode (on/off/oper)*/
203 set_link_led_t set_link_led;
204
205 /* PHY Specific tasks */
206 phy_specific_func_t phy_specific_func;
207#define DISABLE_TX 1
208#define ENABLE_TX 2
209};
210
211/* Inputs parameters to the CLC */
212struct link_params {
213
214 u8 port;
215
216 /* Default / User Configuration */
217 u8 loopback_mode;
218#define LOOPBACK_NONE 0
219#define LOOPBACK_EMAC 1
220#define LOOPBACK_BMAC 2
221#define LOOPBACK_XGXS 3
222#define LOOPBACK_EXT_PHY 4
223#define LOOPBACK_EXT 5
224#define LOOPBACK_UMAC 6
225#define LOOPBACK_XMAC 7
226
227 /* Device parameters */
228 u8 mac_addr[6];
229
230 u16 req_duplex[LINK_CONFIG_SIZE];
231 u16 req_flow_ctrl[LINK_CONFIG_SIZE];
232
233 u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
234
235 /* shmem parameters */
236 u32 shmem_base;
237 u32 shmem2_base;
238 u32 speed_cap_mask[LINK_CONFIG_SIZE];
239 u32 switch_cfg;
240#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
241#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
242#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
243
244 u32 lane_config;
245
246 /* Phy register parameter */
247 u32 chip_id;
248
249 /* features */
250 u32 feature_config_flags;
251#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
252#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
253#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
254#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
255#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
256#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
257 /* Will be populated during common init */
258 struct bnx2x_phy phy[MAX_PHYS];
259
260 /* Will be populated during common init */
261 u8 num_phys;
262
263 u8 rsrv;
264 u16 hw_led_mode; /* part of the hw_config read from the shmem */
265 u32 multi_phy_config;
266
267 /* Device pointer passed to all callback functions */
268 struct bnx2x *bp;
269 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
270 req_flow_ctrl is set to AUTO */
271};
272
273/* Output parameters */
274struct link_vars {
275 u8 phy_flags;
276#define PHY_XGXS_FLAG (1<<0)
277#define PHY_SGMII_FLAG (1<<1)
278#define PHY_PHYSICAL_LINK_FLAG (1<<2)
279#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
280#define PHY_OVER_CURRENT_FLAG (1<<4)
281
282 u8 mac_type;
283#define MAC_TYPE_NONE 0
284#define MAC_TYPE_EMAC 1
285#define MAC_TYPE_BMAC 2
286#define MAC_TYPE_UMAC 3
287#define MAC_TYPE_XMAC 4
288
289 u8 phy_link_up; /* internal phy link indication */
290 u8 link_up;
291
292 u16 line_speed;
293 u16 duplex;
294
295 u16 flow_ctrl;
296 u16 ieee_fc;
297
298 /* The same definitions as the shmem parameter */
299 u32 link_status;
300 u8 fault_detected;
301 u8 rsrv1;
302 u16 periodic_flags;
303#define PERIODIC_FLAGS_LINK_EVENT 0x0001
304
305 u32 aeu_int_mask;
306};
307
308/***********************************************************/
309/* Functions */
310/***********************************************************/
311int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
312
313/* Reset the link. Should be called when driver or interface goes down
314 Before calling phy firmware upgrade, the reset_ext_phy should be set
315 to 0 */
316int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
317 u8 reset_ext_phy);
318
319/* bnx2x_link_update should be called upon link interrupt */
320int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
321
322/* use the following phy functions to read/write from external_phy
323 In order to use it to read/write internal phy registers, use
324 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
325 the register */
326int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
327 u8 devad, u16 reg, u16 *ret_val);
328
329int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
330 u8 devad, u16 reg, u16 val);
331
332/* Reads the link_status from the shmem,
333 and update the link vars accordingly */
334void bnx2x_link_status_update(struct link_params *input,
335 struct link_vars *output);
336/* returns string representing the fw_version of the external phy */
337int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
338 u8 *version, u16 len);
339
340/* Set/Unset the led
341 Basically, the CLC takes care of the led for the link, but in case one needs
342 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
343 blink the led, and LED_MODE_OFF to set the led off.*/
344int bnx2x_set_led(struct link_params *params,
345 struct link_vars *vars, u8 mode, u32 speed);
346#define LED_MODE_OFF 0
347#define LED_MODE_ON 1
348#define LED_MODE_OPER 2
349#define LED_MODE_FRONT_PANEL_OFF 3
350
351/* bnx2x_handle_module_detect_int should be called upon module detection
352 interrupt */
353void bnx2x_handle_module_detect_int(struct link_params *params);
354
355/* Get the actual link status. In case it returns 0, link is up,
356 otherwise link is down*/
357int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
358 u8 is_serdes);
359
360/* One-time initialization for external phy after power up */
361int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
362 u32 shmem2_base_path[], u32 chip_id);
363
364/* Reset the external PHY using GPIO */
365void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
366
367/* Reset the external of SFX7101 */
368void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
369
370/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
371int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
372 struct link_params *params, u16 addr,
373 u8 byte_cnt, u8 *o_buf);
374
375void bnx2x_hw_reset_phy(struct link_params *params);
376
377/* Checks if HW lock is required for this phy/board type */
378u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
379 u32 shmem2_base);
380
381/* Check swap bit and adjust PHY order */
382u32 bnx2x_phy_selection(struct link_params *params);
383
384/* Probe the phys on board, and populate them in "params" */
385int bnx2x_phy_probe(struct link_params *params);
386
387/* Checks if fan failure detection is required on one of the phys on board */
388u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
389 u32 shmem2_base, u8 port);
390
391
392
393/* DCBX structs */
394
395/* Number of maximum COS per chip */
396#define DCBX_E2E3_MAX_NUM_COS (2)
397#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
398#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
399#define DCBX_E3B0_MAX_NUM_COS ( \
400 MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
401 DCBX_E3B0_MAX_NUM_COS_PORT1))
402
403#define DCBX_MAX_NUM_COS ( \
404 MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
405 DCBX_E2E3_MAX_NUM_COS))
406
407/* PFC port configuration params */
408struct bnx2x_nig_brb_pfc_port_params {
409 /* NIG */
410 u32 pause_enable;
411 u32 llfc_out_en;
412 u32 llfc_enable;
413 u32 pkt_priority_to_cos;
414 u8 num_of_rx_cos_priority_mask;
415 u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
416 u32 llfc_high_priority_classes;
417 u32 llfc_low_priority_classes;
418 /* BRB */
419 u32 cos0_pauseable;
420 u32 cos1_pauseable;
421};
422
423
424/* ETS port configuration params */
425struct bnx2x_ets_bw_params {
426 u8 bw;
427};
428
429struct bnx2x_ets_sp_params {
430 /**
431 * valid values are 0 - 5. 0 is highest strict priority.
432 * There can't be two COS's with the same pri.
433 */
434 u8 pri;
435};
436
437enum bnx2x_cos_state {
438 bnx2x_cos_state_strict = 0,
439 bnx2x_cos_state_bw = 1,
440};
441
442struct bnx2x_ets_cos_params {
443 enum bnx2x_cos_state state ;
444 union {
445 struct bnx2x_ets_bw_params bw_params;
446 struct bnx2x_ets_sp_params sp_params;
447 } params;
448};
449
450struct bnx2x_ets_params {
451 u8 num_of_cos; /* Number of valid COS entries*/
452 struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
453};
454
455/**
456 * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
457 * when link is already up
458 */
459int bnx2x_update_pfc(struct link_params *params,
460 struct link_vars *vars,
461 struct bnx2x_nig_brb_pfc_port_params *pfc_params);
462
463
464/* Used to configure the ETS to disable */
465int bnx2x_ets_disabled(struct link_params *params,
466 struct link_vars *vars);
467
468/* Used to configure the ETS to BW limited */
469void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
470 const u32 cos1_bw);
471
472/* Used to configure the ETS to strict */
473int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
474
475
476/* Configure the COS to ETS according to BW and SP settings.*/
477int bnx2x_ets_e3b0_config(const struct link_params *params,
478 const struct link_vars *vars,
479 const struct bnx2x_ets_params *ets_params);
480/* Read pfc statistic*/
481void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
482 u32 pfc_frames_sent[2],
483 u32 pfc_frames_received[2]);
484void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
485 u32 chip_id, u32 shmem_base, u32 shmem2_base,
486 u8 port);
487
488int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
489 struct link_params *params);
490
491void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
492
493#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
new file mode 100644
index 00000000000..15f800085bb
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -0,0 +1,11624 @@
1/* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if_vlan.h>
41#include <net/ip.h>
42#include <net/ipv6.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <net/ip6_checksum.h>
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/crc32c.h>
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
51#include <linux/io.h>
52#include <linux/stringify.h>
53#include <linux/vmalloc.h>
54
55#include "bnx2x.h"
56#include "bnx2x_init.h"
57#include "bnx2x_init_ops.h"
58#include "bnx2x_cmn.h"
59#include "bnx2x_dcb.h"
60#include "bnx2x_sp.h"
61
62#include <linux/firmware.h>
63#include "bnx2x_fw_file_hdr.h"
64/* FW files */
65#define FW_FILE_VERSION \
66 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
67 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
68 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
69 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
70#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
71#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
72#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
73
74/* Time in jiffies before concluding the transmitter is hung */
75#define TX_TIMEOUT (5*HZ)
76
77static char version[] __devinitdata =
78 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
79 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
80
81MODULE_AUTHOR("Eliezer Tamir");
82MODULE_DESCRIPTION("Broadcom NetXtreme II "
83 "BCM57710/57711/57711E/"
84 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
85 "57840/57840_MF Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(DRV_MODULE_VERSION);
88MODULE_FIRMWARE(FW_FILE_NAME_E1);
89MODULE_FIRMWARE(FW_FILE_NAME_E1H);
90MODULE_FIRMWARE(FW_FILE_NAME_E2);
91
92static int multi_mode = 1;
93module_param(multi_mode, int, 0);
94MODULE_PARM_DESC(multi_mode, " Multi queue mode "
95 "(0 Disable; 1 Enable (default))");
96
97int num_queues;
98module_param(num_queues, int, 0);
99MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
100 " (default is as a number of CPUs)");
101
102static int disable_tpa;
103module_param(disable_tpa, int, 0);
104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105
106#define INT_MODE_INTx 1
107#define INT_MODE_MSI 2
108static int int_mode;
109module_param(int_mode, int, 0);
110MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
111 "(1 INT#x; 2 MSI)");
112
113static int dropless_fc;
114module_param(dropless_fc, int, 0);
115MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
116
117static int poll;
118module_param(poll, int, 0);
119MODULE_PARM_DESC(poll, " Use polling (for debug)");
120
121static int mrrs = -1;
122module_param(mrrs, int, 0);
123MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
124
125static int debug;
126module_param(debug, int, 0);
127MODULE_PARM_DESC(debug, " Default debug msglevel");
128
129
130
131struct workqueue_struct *bnx2x_wq;
132
133enum bnx2x_board_type {
134 BCM57710 = 0,
135 BCM57711,
136 BCM57711E,
137 BCM57712,
138 BCM57712_MF,
139 BCM57800,
140 BCM57800_MF,
141 BCM57810,
142 BCM57810_MF,
143 BCM57840,
144 BCM57840_MF
145};
146
147/* indexed by board_type, above */
148static struct {
149 char *name;
150} board_info[] __devinitdata = {
151 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
152 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
153 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
154 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
155 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
156 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
157 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
158 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
159 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
160 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
161 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
162 "Ethernet Multi Function"}
163};
164
165#ifndef PCI_DEVICE_ID_NX2_57710
166#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
167#endif
168#ifndef PCI_DEVICE_ID_NX2_57711
169#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
170#endif
171#ifndef PCI_DEVICE_ID_NX2_57711E
172#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
173#endif
174#ifndef PCI_DEVICE_ID_NX2_57712
175#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
176#endif
177#ifndef PCI_DEVICE_ID_NX2_57712_MF
178#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
179#endif
180#ifndef PCI_DEVICE_ID_NX2_57800
181#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
182#endif
183#ifndef PCI_DEVICE_ID_NX2_57800_MF
184#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
185#endif
186#ifndef PCI_DEVICE_ID_NX2_57810
187#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
188#endif
189#ifndef PCI_DEVICE_ID_NX2_57810_MF
190#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
191#endif
192#ifndef PCI_DEVICE_ID_NX2_57840
193#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
194#endif
195#ifndef PCI_DEVICE_ID_NX2_57840_MF
196#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
197#endif
198static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
199 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
200 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
201 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
202 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
203 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
204 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
205 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
206 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
207 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
208 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
209 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
210 { 0 }
211};
212
213MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
214
215/****************************************************************************
216* General service functions
217****************************************************************************/
218
219static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
220 u32 addr, dma_addr_t mapping)
221{
222 REG_WR(bp, addr, U64_LO(mapping));
223 REG_WR(bp, addr + 4, U64_HI(mapping));
224}
225
226static inline void storm_memset_spq_addr(struct bnx2x *bp,
227 dma_addr_t mapping, u16 abs_fid)
228{
229 u32 addr = XSEM_REG_FAST_MEMORY +
230 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
231
232 __storm_memset_dma_mapping(bp, addr, mapping);
233}
234
235static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
236 u16 pf_id)
237{
238 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
239 pf_id);
240 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
241 pf_id);
242 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
243 pf_id);
244 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
245 pf_id);
246}
247
248static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
249 u8 enable)
250{
251 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
252 enable);
253 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
254 enable);
255 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
256 enable);
257 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
258 enable);
259}
260
261static inline void storm_memset_eq_data(struct bnx2x *bp,
262 struct event_ring_data *eq_data,
263 u16 pfid)
264{
265 size_t size = sizeof(struct event_ring_data);
266
267 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
268
269 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
270}
271
272static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
273 u16 pfid)
274{
275 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
276 REG_WR16(bp, addr, eq_prod);
277}
278
279/* used only at init
280 * locking is done by mcp
281 */
282static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
283{
284 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
285 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
286 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
287 PCICFG_VENDOR_ID_OFFSET);
288}
289
290static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
291{
292 u32 val;
293
294 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
295 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
296 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
297 PCICFG_VENDOR_ID_OFFSET);
298
299 return val;
300}
301
302#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
303#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
304#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
305#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
306#define DMAE_DP_DST_NONE "dst_addr [none]"
307
308static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
309 int msglvl)
310{
311 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
312
313 switch (dmae->opcode & DMAE_COMMAND_DST) {
314 case DMAE_CMD_DST_PCI:
315 if (src_type == DMAE_CMD_SRC_PCI)
316 DP(msglvl, "DMAE: opcode 0x%08x\n"
317 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
318 "comp_addr [%x:%08x], comp_val 0x%08x\n",
319 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
320 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
321 dmae->comp_addr_hi, dmae->comp_addr_lo,
322 dmae->comp_val);
323 else
324 DP(msglvl, "DMAE: opcode 0x%08x\n"
325 "src [%08x], len [%d*4], dst [%x:%08x]\n"
326 "comp_addr [%x:%08x], comp_val 0x%08x\n",
327 dmae->opcode, dmae->src_addr_lo >> 2,
328 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
329 dmae->comp_addr_hi, dmae->comp_addr_lo,
330 dmae->comp_val);
331 break;
332 case DMAE_CMD_DST_GRC:
333 if (src_type == DMAE_CMD_SRC_PCI)
334 DP(msglvl, "DMAE: opcode 0x%08x\n"
335 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
336 "comp_addr [%x:%08x], comp_val 0x%08x\n",
337 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
338 dmae->len, dmae->dst_addr_lo >> 2,
339 dmae->comp_addr_hi, dmae->comp_addr_lo,
340 dmae->comp_val);
341 else
342 DP(msglvl, "DMAE: opcode 0x%08x\n"
343 "src [%08x], len [%d*4], dst [%08x]\n"
344 "comp_addr [%x:%08x], comp_val 0x%08x\n",
345 dmae->opcode, dmae->src_addr_lo >> 2,
346 dmae->len, dmae->dst_addr_lo >> 2,
347 dmae->comp_addr_hi, dmae->comp_addr_lo,
348 dmae->comp_val);
349 break;
350 default:
351 if (src_type == DMAE_CMD_SRC_PCI)
352 DP(msglvl, "DMAE: opcode 0x%08x\n"
353 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
354 "dst_addr [none]\n"
355 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
356 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
357 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
358 dmae->comp_val);
359 else
360 DP(msglvl, "DMAE: opcode 0x%08x\n"
361 DP_LEVEL "src_addr [%08x] len [%d * 4] "
362 "dst_addr [none]\n"
363 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
364 dmae->opcode, dmae->src_addr_lo >> 2,
365 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
366 dmae->comp_val);
367 break;
368 }
369
370}
371
372/* copy command into DMAE command memory and set DMAE command go */
373void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
374{
375 u32 cmd_offset;
376 int i;
377
378 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
379 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
380 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
381
382 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
383 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
384 }
385 REG_WR(bp, dmae_reg_go_c[idx], 1);
386}
387
388u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
389{
390 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
391 DMAE_CMD_C_ENABLE);
392}
393
394u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
395{
396 return opcode & ~DMAE_CMD_SRC_RESET;
397}
398
399u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
400 bool with_comp, u8 comp_type)
401{
402 u32 opcode = 0;
403
404 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
405 (dst_type << DMAE_COMMAND_DST_SHIFT));
406
407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
408
409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
410 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
411 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
413
414#ifdef __BIG_ENDIAN
415 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
416#else
417 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
418#endif
419 if (with_comp)
420 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
421 return opcode;
422}
423
424static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
425 struct dmae_command *dmae,
426 u8 src_type, u8 dst_type)
427{
428 memset(dmae, 0, sizeof(struct dmae_command));
429
430 /* set the opcode */
431 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
432 true, DMAE_COMP_PCI);
433
434 /* fill in the completion parameters */
435 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
436 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
437 dmae->comp_val = DMAE_COMP_VAL;
438}
439
440/* issue a dmae command over the init-channel and wailt for completion */
441static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
442 struct dmae_command *dmae)
443{
444 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
445 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
446 int rc = 0;
447
448 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
449 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
450 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
451
452 /*
453 * Lock the dmae channel. Disable BHs to prevent a dead-lock
454 * as long as this code is called both from syscall context and
455 * from ndo_set_rx_mode() flow that may be called from BH.
456 */
457 spin_lock_bh(&bp->dmae_lock);
458
459 /* reset completion */
460 *wb_comp = 0;
461
462 /* post the command on the channel used for initializations */
463 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
464
465 /* wait for completion */
466 udelay(5);
467 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
468 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
469
470 if (!cnt) {
471 BNX2X_ERR("DMAE timeout!\n");
472 rc = DMAE_TIMEOUT;
473 goto unlock;
474 }
475 cnt--;
476 udelay(50);
477 }
478 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
479 BNX2X_ERR("DMAE PCI error!\n");
480 rc = DMAE_PCI_ERROR;
481 }
482
483 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
484 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
485 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
486
487unlock:
488 spin_unlock_bh(&bp->dmae_lock);
489 return rc;
490}
491
492void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
493 u32 len32)
494{
495 struct dmae_command dmae;
496
497 if (!bp->dmae_ready) {
498 u32 *data = bnx2x_sp(bp, wb_data[0]);
499
500 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
501 " using indirect\n", dst_addr, len32);
502 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
503 return;
504 }
505
506 /* set opcode and fixed command fields */
507 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
508
509 /* fill in addresses and len */
510 dmae.src_addr_lo = U64_LO(dma_addr);
511 dmae.src_addr_hi = U64_HI(dma_addr);
512 dmae.dst_addr_lo = dst_addr >> 2;
513 dmae.dst_addr_hi = 0;
514 dmae.len = len32;
515
516 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
517
518 /* issue the command and wait for completion */
519 bnx2x_issue_dmae_with_comp(bp, &dmae);
520}
521
522void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
523{
524 struct dmae_command dmae;
525
526 if (!bp->dmae_ready) {
527 u32 *data = bnx2x_sp(bp, wb_data[0]);
528 int i;
529
530 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
531 " using indirect\n", src_addr, len32);
532 for (i = 0; i < len32; i++)
533 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
534 return;
535 }
536
537 /* set opcode and fixed command fields */
538 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
539
540 /* fill in addresses and len */
541 dmae.src_addr_lo = src_addr >> 2;
542 dmae.src_addr_hi = 0;
543 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
544 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
545 dmae.len = len32;
546
547 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
548
549 /* issue the command and wait for completion */
550 bnx2x_issue_dmae_with_comp(bp, &dmae);
551}
552
553static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
554 u32 addr, u32 len)
555{
556 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
557 int offset = 0;
558
559 while (len > dmae_wr_max) {
560 bnx2x_write_dmae(bp, phys_addr + offset,
561 addr + offset, dmae_wr_max);
562 offset += dmae_wr_max * 4;
563 len -= dmae_wr_max;
564 }
565
566 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
567}
568
569/* used only for slowpath so not inlined */
570static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
571{
572 u32 wb_write[2];
573
574 wb_write[0] = val_hi;
575 wb_write[1] = val_lo;
576 REG_WR_DMAE(bp, reg, wb_write, 2);
577}
578
579#ifdef USE_WB_RD
580static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
581{
582 u32 wb_data[2];
583
584 REG_RD_DMAE(bp, reg, wb_data, 2);
585
586 return HILO_U64(wb_data[0], wb_data[1]);
587}
588#endif
589
590static int bnx2x_mc_assert(struct bnx2x *bp)
591{
592 char last_idx;
593 int i, rc = 0;
594 u32 row0, row1, row2, row3;
595
596 /* XSTORM */
597 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
598 XSTORM_ASSERT_LIST_INDEX_OFFSET);
599 if (last_idx)
600 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
601
602 /* print the asserts */
603 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
604
605 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
606 XSTORM_ASSERT_LIST_OFFSET(i));
607 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
608 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
609 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
610 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
611 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
612 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
613
614 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
615 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
616 " 0x%08x 0x%08x 0x%08x\n",
617 i, row3, row2, row1, row0);
618 rc++;
619 } else {
620 break;
621 }
622 }
623
624 /* TSTORM */
625 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
626 TSTORM_ASSERT_LIST_INDEX_OFFSET);
627 if (last_idx)
628 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
629
630 /* print the asserts */
631 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
632
633 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
634 TSTORM_ASSERT_LIST_OFFSET(i));
635 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
636 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
637 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
638 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
639 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
640 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
641
642 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
643 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
644 " 0x%08x 0x%08x 0x%08x\n",
645 i, row3, row2, row1, row0);
646 rc++;
647 } else {
648 break;
649 }
650 }
651
652 /* CSTORM */
653 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
654 CSTORM_ASSERT_LIST_INDEX_OFFSET);
655 if (last_idx)
656 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
657
658 /* print the asserts */
659 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
660
661 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
662 CSTORM_ASSERT_LIST_OFFSET(i));
663 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
664 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
665 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
666 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
667 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
668 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
669
670 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
671 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
672 " 0x%08x 0x%08x 0x%08x\n",
673 i, row3, row2, row1, row0);
674 rc++;
675 } else {
676 break;
677 }
678 }
679
680 /* USTORM */
681 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
682 USTORM_ASSERT_LIST_INDEX_OFFSET);
683 if (last_idx)
684 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
685
686 /* print the asserts */
687 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
688
689 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
690 USTORM_ASSERT_LIST_OFFSET(i));
691 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
692 USTORM_ASSERT_LIST_OFFSET(i) + 4);
693 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
694 USTORM_ASSERT_LIST_OFFSET(i) + 8);
695 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
696 USTORM_ASSERT_LIST_OFFSET(i) + 12);
697
698 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
699 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
700 " 0x%08x 0x%08x 0x%08x\n",
701 i, row3, row2, row1, row0);
702 rc++;
703 } else {
704 break;
705 }
706 }
707
708 return rc;
709}
710
711void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
712{
713 u32 addr, val;
714 u32 mark, offset;
715 __be32 data[9];
716 int word;
717 u32 trace_shmem_base;
718 if (BP_NOMCP(bp)) {
719 BNX2X_ERR("NO MCP - can not dump\n");
720 return;
721 }
722 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
723 (bp->common.bc_ver & 0xff0000) >> 16,
724 (bp->common.bc_ver & 0xff00) >> 8,
725 (bp->common.bc_ver & 0xff));
726
727 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
728 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
729 printk("%s" "MCP PC at 0x%x\n", lvl, val);
730
731 if (BP_PATH(bp) == 0)
732 trace_shmem_base = bp->common.shmem_base;
733 else
734 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
735 addr = trace_shmem_base - 0x0800 + 4;
736 mark = REG_RD(bp, addr);
737 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
738 + ((mark + 0x3) & ~0x3) - 0x08000000;
739 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
740
741 printk("%s", lvl);
742 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
743 for (word = 0; word < 8; word++)
744 data[word] = htonl(REG_RD(bp, offset + 4*word));
745 data[8] = 0x0;
746 pr_cont("%s", (char *)data);
747 }
748 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
749 for (word = 0; word < 8; word++)
750 data[word] = htonl(REG_RD(bp, offset + 4*word));
751 data[8] = 0x0;
752 pr_cont("%s", (char *)data);
753 }
754 printk("%s" "end of fw dump\n", lvl);
755}
756
757static inline void bnx2x_fw_dump(struct bnx2x *bp)
758{
759 bnx2x_fw_dump_lvl(bp, KERN_ERR);
760}
761
762void bnx2x_panic_dump(struct bnx2x *bp)
763{
764 int i;
765 u16 j;
766 struct hc_sp_status_block_data sp_sb_data;
767 int func = BP_FUNC(bp);
768#ifdef BNX2X_STOP_ON_ERROR
769 u16 start = 0, end = 0;
770 u8 cos;
771#endif
772
773 bp->stats_state = STATS_STATE_DISABLED;
774 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
775
776 BNX2X_ERR("begin crash dump -----------------\n");
777
778 /* Indices */
779 /* Common */
780 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
781 " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
782 bp->def_idx, bp->def_att_idx, bp->attn_state,
783 bp->spq_prod_idx, bp->stats_counter);
784 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
785 bp->def_status_blk->atten_status_block.attn_bits,
786 bp->def_status_blk->atten_status_block.attn_bits_ack,
787 bp->def_status_blk->atten_status_block.status_block_id,
788 bp->def_status_blk->atten_status_block.attn_bits_index);
789 BNX2X_ERR(" def (");
790 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
791 pr_cont("0x%x%s",
792 bp->def_status_blk->sp_sb.index_values[i],
793 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
794
795 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
796 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
797 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
798 i*sizeof(u32));
799
800 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) "
801 "pf_id(0x%x) vnic_id(0x%x) "
802 "vf_id(0x%x) vf_valid (0x%x) "
803 "state(0x%x)\n",
804 sp_sb_data.igu_sb_id,
805 sp_sb_data.igu_seg_id,
806 sp_sb_data.p_func.pf_id,
807 sp_sb_data.p_func.vnic_id,
808 sp_sb_data.p_func.vf_id,
809 sp_sb_data.p_func.vf_valid,
810 sp_sb_data.state);
811
812
813 for_each_eth_queue(bp, i) {
814 struct bnx2x_fastpath *fp = &bp->fp[i];
815 int loop;
816 struct hc_status_block_data_e2 sb_data_e2;
817 struct hc_status_block_data_e1x sb_data_e1x;
818 struct hc_status_block_sm *hc_sm_p =
819 CHIP_IS_E1x(bp) ?
820 sb_data_e1x.common.state_machine :
821 sb_data_e2.common.state_machine;
822 struct hc_index_data *hc_index_p =
823 CHIP_IS_E1x(bp) ?
824 sb_data_e1x.index_data :
825 sb_data_e2.index_data;
826 u8 data_size, cos;
827 u32 *sb_data_p;
828 struct bnx2x_fp_txdata txdata;
829
830 /* Rx */
831 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
832 " rx_comp_prod(0x%x)"
833 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
834 i, fp->rx_bd_prod, fp->rx_bd_cons,
835 fp->rx_comp_prod,
836 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
837 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
838 " fp_hc_idx(0x%x)\n",
839 fp->rx_sge_prod, fp->last_max_sge,
840 le16_to_cpu(fp->fp_hc_idx));
841
842 /* Tx */
843 for_each_cos_in_tx_queue(fp, cos)
844 {
845 txdata = fp->txdata[cos];
846 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
847 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
848 " *tx_cons_sb(0x%x)\n",
849 i, txdata.tx_pkt_prod,
850 txdata.tx_pkt_cons, txdata.tx_bd_prod,
851 txdata.tx_bd_cons,
852 le16_to_cpu(*txdata.tx_cons_sb));
853 }
854
855 loop = CHIP_IS_E1x(bp) ?
856 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
857
858 /* host sb data */
859
860#ifdef BCM_CNIC
861 if (IS_FCOE_FP(fp))
862 continue;
863#endif
864 BNX2X_ERR(" run indexes (");
865 for (j = 0; j < HC_SB_MAX_SM; j++)
866 pr_cont("0x%x%s",
867 fp->sb_running_index[j],
868 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
869
870 BNX2X_ERR(" indexes (");
871 for (j = 0; j < loop; j++)
872 pr_cont("0x%x%s",
873 fp->sb_index_values[j],
874 (j == loop - 1) ? ")" : " ");
875 /* fw sb data */
876 data_size = CHIP_IS_E1x(bp) ?
877 sizeof(struct hc_status_block_data_e1x) :
878 sizeof(struct hc_status_block_data_e2);
879 data_size /= sizeof(u32);
880 sb_data_p = CHIP_IS_E1x(bp) ?
881 (u32 *)&sb_data_e1x :
882 (u32 *)&sb_data_e2;
883 /* copy sb data in here */
884 for (j = 0; j < data_size; j++)
885 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
886 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
887 j * sizeof(u32));
888
889 if (!CHIP_IS_E1x(bp)) {
890 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
891 "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
892 "state(0x%x)\n",
893 sb_data_e2.common.p_func.pf_id,
894 sb_data_e2.common.p_func.vf_id,
895 sb_data_e2.common.p_func.vf_valid,
896 sb_data_e2.common.p_func.vnic_id,
897 sb_data_e2.common.same_igu_sb_1b,
898 sb_data_e2.common.state);
899 } else {
900 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
901 "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
902 "state(0x%x)\n",
903 sb_data_e1x.common.p_func.pf_id,
904 sb_data_e1x.common.p_func.vf_id,
905 sb_data_e1x.common.p_func.vf_valid,
906 sb_data_e1x.common.p_func.vnic_id,
907 sb_data_e1x.common.same_igu_sb_1b,
908 sb_data_e1x.common.state);
909 }
910
911 /* SB_SMs data */
912 for (j = 0; j < HC_SB_MAX_SM; j++) {
913 pr_cont("SM[%d] __flags (0x%x) "
914 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
915 "time_to_expire (0x%x) "
916 "timer_value(0x%x)\n", j,
917 hc_sm_p[j].__flags,
918 hc_sm_p[j].igu_sb_id,
919 hc_sm_p[j].igu_seg_id,
920 hc_sm_p[j].time_to_expire,
921 hc_sm_p[j].timer_value);
922 }
923
924 /* Indecies data */
925 for (j = 0; j < loop; j++) {
926 pr_cont("INDEX[%d] flags (0x%x) "
927 "timeout (0x%x)\n", j,
928 hc_index_p[j].flags,
929 hc_index_p[j].timeout);
930 }
931 }
932
933#ifdef BNX2X_STOP_ON_ERROR
934 /* Rings */
935 /* Rx */
936 for_each_rx_queue(bp, i) {
937 struct bnx2x_fastpath *fp = &bp->fp[i];
938
939 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
940 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
941 for (j = start; j != end; j = RX_BD(j + 1)) {
942 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
943 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
944
945 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
946 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
947 }
948
949 start = RX_SGE(fp->rx_sge_prod);
950 end = RX_SGE(fp->last_max_sge);
951 for (j = start; j != end; j = RX_SGE(j + 1)) {
952 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
953 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
954
955 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
956 i, j, rx_sge[1], rx_sge[0], sw_page->page);
957 }
958
959 start = RCQ_BD(fp->rx_comp_cons - 10);
960 end = RCQ_BD(fp->rx_comp_cons + 503);
961 for (j = start; j != end; j = RCQ_BD(j + 1)) {
962 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
963
964 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
965 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
966 }
967 }
968
969 /* Tx */
970 for_each_tx_queue(bp, i) {
971 struct bnx2x_fastpath *fp = &bp->fp[i];
972 for_each_cos_in_tx_queue(fp, cos) {
973 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
974
975 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
976 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
977 for (j = start; j != end; j = TX_BD(j + 1)) {
978 struct sw_tx_bd *sw_bd =
979 &txdata->tx_buf_ring[j];
980
981 BNX2X_ERR("fp%d: txdata %d, "
982 "packet[%x]=[%p,%x]\n",
983 i, cos, j, sw_bd->skb,
984 sw_bd->first_bd);
985 }
986
987 start = TX_BD(txdata->tx_bd_cons - 10);
988 end = TX_BD(txdata->tx_bd_cons + 254);
989 for (j = start; j != end; j = TX_BD(j + 1)) {
990 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
991
992 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
993 "[%x:%x:%x:%x]\n",
994 i, cos, j, tx_bd[0], tx_bd[1],
995 tx_bd[2], tx_bd[3]);
996 }
997 }
998 }
999#endif
1000 bnx2x_fw_dump(bp);
1001 bnx2x_mc_assert(bp);
1002 BNX2X_ERR("end crash dump -----------------\n");
1003}
1004
1005/*
1006 * FLR Support for E2
1007 *
1008 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1009 * initialization.
1010 */
1011#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
1012#define FLR_WAIT_INTERAVAL 50 /* usec */
1013#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
1014
1015struct pbf_pN_buf_regs {
1016 int pN;
1017 u32 init_crd;
1018 u32 crd;
1019 u32 crd_freed;
1020};
1021
1022struct pbf_pN_cmd_regs {
1023 int pN;
1024 u32 lines_occup;
1025 u32 lines_freed;
1026};
1027
1028static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1029 struct pbf_pN_buf_regs *regs,
1030 u32 poll_count)
1031{
1032 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1033 u32 cur_cnt = poll_count;
1034
1035 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1036 crd = crd_start = REG_RD(bp, regs->crd);
1037 init_crd = REG_RD(bp, regs->init_crd);
1038
1039 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1040 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1041 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1042
1043 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1044 (init_crd - crd_start))) {
1045 if (cur_cnt--) {
1046 udelay(FLR_WAIT_INTERAVAL);
1047 crd = REG_RD(bp, regs->crd);
1048 crd_freed = REG_RD(bp, regs->crd_freed);
1049 } else {
1050 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1051 regs->pN);
1052 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1053 regs->pN, crd);
1054 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1055 regs->pN, crd_freed);
1056 break;
1057 }
1058 }
1059 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1060 poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
1061}
1062
1063static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1064 struct pbf_pN_cmd_regs *regs,
1065 u32 poll_count)
1066{
1067 u32 occup, to_free, freed, freed_start;
1068 u32 cur_cnt = poll_count;
1069
1070 occup = to_free = REG_RD(bp, regs->lines_occup);
1071 freed = freed_start = REG_RD(bp, regs->lines_freed);
1072
1073 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1074 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1075
1076 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1077 if (cur_cnt--) {
1078 udelay(FLR_WAIT_INTERAVAL);
1079 occup = REG_RD(bp, regs->lines_occup);
1080 freed = REG_RD(bp, regs->lines_freed);
1081 } else {
1082 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1083 regs->pN);
1084 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1085 regs->pN, occup);
1086 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1087 regs->pN, freed);
1088 break;
1089 }
1090 }
1091 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1092 poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
1093}
1094
1095static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1096 u32 expected, u32 poll_count)
1097{
1098 u32 cur_cnt = poll_count;
1099 u32 val;
1100
1101 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1102 udelay(FLR_WAIT_INTERAVAL);
1103
1104 return val;
1105}
1106
1107static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1108 char *msg, u32 poll_cnt)
1109{
1110 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1111 if (val != 0) {
1112 BNX2X_ERR("%s usage count=%d\n", msg, val);
1113 return 1;
1114 }
1115 return 0;
1116}
1117
1118static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1119{
1120 /* adjust polling timeout */
1121 if (CHIP_REV_IS_EMUL(bp))
1122 return FLR_POLL_CNT * 2000;
1123
1124 if (CHIP_REV_IS_FPGA(bp))
1125 return FLR_POLL_CNT * 120;
1126
1127 return FLR_POLL_CNT;
1128}
1129
1130static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1131{
1132 struct pbf_pN_cmd_regs cmd_regs[] = {
1133 {0, (CHIP_IS_E3B0(bp)) ?
1134 PBF_REG_TQ_OCCUPANCY_Q0 :
1135 PBF_REG_P0_TQ_OCCUPANCY,
1136 (CHIP_IS_E3B0(bp)) ?
1137 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1138 PBF_REG_P0_TQ_LINES_FREED_CNT},
1139 {1, (CHIP_IS_E3B0(bp)) ?
1140 PBF_REG_TQ_OCCUPANCY_Q1 :
1141 PBF_REG_P1_TQ_OCCUPANCY,
1142 (CHIP_IS_E3B0(bp)) ?
1143 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1144 PBF_REG_P1_TQ_LINES_FREED_CNT},
1145 {4, (CHIP_IS_E3B0(bp)) ?
1146 PBF_REG_TQ_OCCUPANCY_LB_Q :
1147 PBF_REG_P4_TQ_OCCUPANCY,
1148 (CHIP_IS_E3B0(bp)) ?
1149 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1150 PBF_REG_P4_TQ_LINES_FREED_CNT}
1151 };
1152
1153 struct pbf_pN_buf_regs buf_regs[] = {
1154 {0, (CHIP_IS_E3B0(bp)) ?
1155 PBF_REG_INIT_CRD_Q0 :
1156 PBF_REG_P0_INIT_CRD ,
1157 (CHIP_IS_E3B0(bp)) ?
1158 PBF_REG_CREDIT_Q0 :
1159 PBF_REG_P0_CREDIT,
1160 (CHIP_IS_E3B0(bp)) ?
1161 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1162 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1163 {1, (CHIP_IS_E3B0(bp)) ?
1164 PBF_REG_INIT_CRD_Q1 :
1165 PBF_REG_P1_INIT_CRD,
1166 (CHIP_IS_E3B0(bp)) ?
1167 PBF_REG_CREDIT_Q1 :
1168 PBF_REG_P1_CREDIT,
1169 (CHIP_IS_E3B0(bp)) ?
1170 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1171 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1172 {4, (CHIP_IS_E3B0(bp)) ?
1173 PBF_REG_INIT_CRD_LB_Q :
1174 PBF_REG_P4_INIT_CRD,
1175 (CHIP_IS_E3B0(bp)) ?
1176 PBF_REG_CREDIT_LB_Q :
1177 PBF_REG_P4_CREDIT,
1178 (CHIP_IS_E3B0(bp)) ?
1179 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1180 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1181 };
1182
1183 int i;
1184
1185 /* Verify the command queues are flushed P0, P1, P4 */
1186 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1187 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1188
1189
1190 /* Verify the transmission buffers are flushed P0, P1, P4 */
1191 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1192 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1193}
1194
1195#define OP_GEN_PARAM(param) \
1196 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1197
1198#define OP_GEN_TYPE(type) \
1199 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1200
1201#define OP_GEN_AGG_VECT(index) \
1202 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1203
1204
1205static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1206 u32 poll_cnt)
1207{
1208 struct sdm_op_gen op_gen = {0};
1209
1210 u32 comp_addr = BAR_CSTRORM_INTMEM +
1211 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1212 int ret = 0;
1213
1214 if (REG_RD(bp, comp_addr)) {
1215 BNX2X_ERR("Cleanup complete is not 0\n");
1216 return 1;
1217 }
1218
1219 op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1220 op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1221 op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
1222 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1223
1224 DP(BNX2X_MSG_SP, "FW Final cleanup\n");
1225 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
1226
1227 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1228 BNX2X_ERR("FW final cleanup did not succeed\n");
1229 ret = 1;
1230 }
1231 /* Zero completion for nxt FLR */
1232 REG_WR(bp, comp_addr, 0);
1233
1234 return ret;
1235}
1236
1237static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1238{
1239 int pos;
1240 u16 status;
1241
1242 pos = pci_pcie_cap(dev);
1243 if (!pos)
1244 return false;
1245
1246 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
1247 return status & PCI_EXP_DEVSTA_TRPND;
1248}
1249
1250/* PF FLR specific routines
1251*/
1252static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1253{
1254
1255 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1256 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1257 CFC_REG_NUM_LCIDS_INSIDE_PF,
1258 "CFC PF usage counter timed out",
1259 poll_cnt))
1260 return 1;
1261
1262
1263 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1264 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1265 DORQ_REG_PF_USAGE_CNT,
1266 "DQ PF usage counter timed out",
1267 poll_cnt))
1268 return 1;
1269
1270 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1271 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1272 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1273 "QM PF usage counter timed out",
1274 poll_cnt))
1275 return 1;
1276
1277 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1278 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1279 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1280 "Timers VNIC usage counter timed out",
1281 poll_cnt))
1282 return 1;
1283 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1284 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1285 "Timers NUM_SCANS usage counter timed out",
1286 poll_cnt))
1287 return 1;
1288
1289 /* Wait DMAE PF usage counter to zero */
1290 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1291 dmae_reg_go_c[INIT_DMAE_C(bp)],
1292 "DMAE dommand register timed out",
1293 poll_cnt))
1294 return 1;
1295
1296 return 0;
1297}
1298
1299static void bnx2x_hw_enable_status(struct bnx2x *bp)
1300{
1301 u32 val;
1302
1303 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1304 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1305
1306 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1307 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1308
1309 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1310 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1311
1312 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1313 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1314
1315 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1316 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1317
1318 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1319 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1320
1321 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1322 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1323
1324 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1325 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1326 val);
1327}
1328
1329static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1330{
1331 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1332
1333 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1334
1335 /* Re-enable PF target read access */
1336 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1337
1338 /* Poll HW usage counters */
1339 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1340 return -EBUSY;
1341
1342 /* Zero the igu 'trailing edge' and 'leading edge' */
1343
1344 /* Send the FW cleanup command */
1345 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1346 return -EBUSY;
1347
1348 /* ATC cleanup */
1349
1350 /* Verify TX hw is flushed */
1351 bnx2x_tx_hw_flushed(bp, poll_cnt);
1352
1353 /* Wait 100ms (not adjusted according to platform) */
1354 msleep(100);
1355
1356 /* Verify no pending pci transactions */
1357 if (bnx2x_is_pcie_pending(bp->pdev))
1358 BNX2X_ERR("PCIE Transactions still pending\n");
1359
1360 /* Debug */
1361 bnx2x_hw_enable_status(bp);
1362
1363 /*
1364 * Master enable - Due to WB DMAE writes performed before this
1365 * register is re-initialized as part of the regular function init
1366 */
1367 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1368
1369 return 0;
1370}
1371
1372static void bnx2x_hc_int_enable(struct bnx2x *bp)
1373{
1374 int port = BP_PORT(bp);
1375 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1376 u32 val = REG_RD(bp, addr);
1377 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1378 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1379
1380 if (msix) {
1381 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1382 HC_CONFIG_0_REG_INT_LINE_EN_0);
1383 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1384 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1385 } else if (msi) {
1386 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1387 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1388 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1389 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1390 } else {
1391 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1392 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1393 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1394 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1395
1396 if (!CHIP_IS_E1(bp)) {
1397 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1398 val, port, addr);
1399
1400 REG_WR(bp, addr, val);
1401
1402 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1403 }
1404 }
1405
1406 if (CHIP_IS_E1(bp))
1407 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1408
1409 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1410 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1411
1412 REG_WR(bp, addr, val);
1413 /*
1414 * Ensure that HC_CONFIG is written before leading/trailing edge config
1415 */
1416 mmiowb();
1417 barrier();
1418
1419 if (!CHIP_IS_E1(bp)) {
1420 /* init leading/trailing edge */
1421 if (IS_MF(bp)) {
1422 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1423 if (bp->port.pmf)
1424 /* enable nig and gpio3 attention */
1425 val |= 0x1100;
1426 } else
1427 val = 0xffff;
1428
1429 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1430 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1431 }
1432
1433 /* Make sure that interrupts are indeed enabled from here on */
1434 mmiowb();
1435}
1436
1437static void bnx2x_igu_int_enable(struct bnx2x *bp)
1438{
1439 u32 val;
1440 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1441 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1442
1443 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1444
1445 if (msix) {
1446 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1447 IGU_PF_CONF_SINGLE_ISR_EN);
1448 val |= (IGU_PF_CONF_FUNC_EN |
1449 IGU_PF_CONF_MSI_MSIX_EN |
1450 IGU_PF_CONF_ATTN_BIT_EN);
1451 } else if (msi) {
1452 val &= ~IGU_PF_CONF_INT_LINE_EN;
1453 val |= (IGU_PF_CONF_FUNC_EN |
1454 IGU_PF_CONF_MSI_MSIX_EN |
1455 IGU_PF_CONF_ATTN_BIT_EN |
1456 IGU_PF_CONF_SINGLE_ISR_EN);
1457 } else {
1458 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1459 val |= (IGU_PF_CONF_FUNC_EN |
1460 IGU_PF_CONF_INT_LINE_EN |
1461 IGU_PF_CONF_ATTN_BIT_EN |
1462 IGU_PF_CONF_SINGLE_ISR_EN);
1463 }
1464
1465 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1466 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1467
1468 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1469
1470 barrier();
1471
1472 /* init leading/trailing edge */
1473 if (IS_MF(bp)) {
1474 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1475 if (bp->port.pmf)
1476 /* enable nig and gpio3 attention */
1477 val |= 0x1100;
1478 } else
1479 val = 0xffff;
1480
1481 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1482 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1483
1484 /* Make sure that interrupts are indeed enabled from here on */
1485 mmiowb();
1486}
1487
1488void bnx2x_int_enable(struct bnx2x *bp)
1489{
1490 if (bp->common.int_block == INT_BLOCK_HC)
1491 bnx2x_hc_int_enable(bp);
1492 else
1493 bnx2x_igu_int_enable(bp);
1494}
1495
1496static void bnx2x_hc_int_disable(struct bnx2x *bp)
1497{
1498 int port = BP_PORT(bp);
1499 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1500 u32 val = REG_RD(bp, addr);
1501
1502 /*
1503 * in E1 we must use only PCI configuration space to disable
1504 * MSI/MSIX capablility
1505 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1506 */
1507 if (CHIP_IS_E1(bp)) {
1508 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1509 * Use mask register to prevent from HC sending interrupts
1510 * after we exit the function
1511 */
1512 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1513
1514 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1515 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1516 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1517 } else
1518 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1519 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1520 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1521 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1522
1523 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1524 val, port, addr);
1525
1526 /* flush all outstanding writes */
1527 mmiowb();
1528
1529 REG_WR(bp, addr, val);
1530 if (REG_RD(bp, addr) != val)
1531 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1532}
1533
1534static void bnx2x_igu_int_disable(struct bnx2x *bp)
1535{
1536 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1537
1538 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1539 IGU_PF_CONF_INT_LINE_EN |
1540 IGU_PF_CONF_ATTN_BIT_EN);
1541
1542 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1543
1544 /* flush all outstanding writes */
1545 mmiowb();
1546
1547 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1548 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1549 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1550}
1551
1552void bnx2x_int_disable(struct bnx2x *bp)
1553{
1554 if (bp->common.int_block == INT_BLOCK_HC)
1555 bnx2x_hc_int_disable(bp);
1556 else
1557 bnx2x_igu_int_disable(bp);
1558}
1559
1560void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1561{
1562 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1563 int i, offset;
1564
1565 if (disable_hw)
1566 /* prevent the HW from sending interrupts */
1567 bnx2x_int_disable(bp);
1568
1569 /* make sure all ISRs are done */
1570 if (msix) {
1571 synchronize_irq(bp->msix_table[0].vector);
1572 offset = 1;
1573#ifdef BCM_CNIC
1574 offset++;
1575#endif
1576 for_each_eth_queue(bp, i)
1577 synchronize_irq(bp->msix_table[offset++].vector);
1578 } else
1579 synchronize_irq(bp->pdev->irq);
1580
1581 /* make sure sp_task is not running */
1582 cancel_delayed_work(&bp->sp_task);
1583 cancel_delayed_work(&bp->period_task);
1584 flush_workqueue(bnx2x_wq);
1585}
1586
1587/* fast path */
1588
1589/*
1590 * General service functions
1591 */
1592
1593/* Return true if succeeded to acquire the lock */
1594static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1595{
1596 u32 lock_status;
1597 u32 resource_bit = (1 << resource);
1598 int func = BP_FUNC(bp);
1599 u32 hw_lock_control_reg;
1600
1601 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1602
1603 /* Validating that the resource is within range */
1604 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1605 DP(NETIF_MSG_HW,
1606 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1607 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1608 return false;
1609 }
1610
1611 if (func <= 5)
1612 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1613 else
1614 hw_lock_control_reg =
1615 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1616
1617 /* Try to acquire the lock */
1618 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1619 lock_status = REG_RD(bp, hw_lock_control_reg);
1620 if (lock_status & resource_bit)
1621 return true;
1622
1623 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1624 return false;
1625}
1626
1627/**
1628 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1629 *
1630 * @bp: driver handle
1631 *
1632 * Returns the recovery leader resource id according to the engine this function
1633 * belongs to. Currently only only 2 engines is supported.
1634 */
1635static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1636{
1637 if (BP_PATH(bp))
1638 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1639 else
1640 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1641}
1642
1643/**
1644 * bnx2x_trylock_leader_lock- try to aquire a leader lock.
1645 *
1646 * @bp: driver handle
1647 *
1648 * Tries to aquire a leader lock for cuurent engine.
1649 */
1650static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1651{
1652 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1653}
1654
1655#ifdef BCM_CNIC
1656static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1657#endif
1658
1659void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1660{
1661 struct bnx2x *bp = fp->bp;
1662 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1663 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1664 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1665 struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
1666
1667 DP(BNX2X_MSG_SP,
1668 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1669 fp->index, cid, command, bp->state,
1670 rr_cqe->ramrod_cqe.ramrod_type);
1671
1672 switch (command) {
1673 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1674 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1675 drv_cmd = BNX2X_Q_CMD_UPDATE;
1676 break;
1677
1678 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1679 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1680 drv_cmd = BNX2X_Q_CMD_SETUP;
1681 break;
1682
1683 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1684 DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1685 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1686 break;
1687
1688 case (RAMROD_CMD_ID_ETH_HALT):
1689 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1690 drv_cmd = BNX2X_Q_CMD_HALT;
1691 break;
1692
1693 case (RAMROD_CMD_ID_ETH_TERMINATE):
1694 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
1695 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1696 break;
1697
1698 case (RAMROD_CMD_ID_ETH_EMPTY):
1699 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1700 drv_cmd = BNX2X_Q_CMD_EMPTY;
1701 break;
1702
1703 default:
1704 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1705 command, fp->index);
1706 return;
1707 }
1708
1709 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1710 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1711 /* q_obj->complete_cmd() failure means that this was
1712 * an unexpected completion.
1713 *
1714 * In this case we don't want to increase the bp->spq_left
1715 * because apparently we haven't sent this command the first
1716 * place.
1717 */
1718#ifdef BNX2X_STOP_ON_ERROR
1719 bnx2x_panic();
1720#else
1721 return;
1722#endif
1723
1724 smp_mb__before_atomic_inc();
1725 atomic_inc(&bp->cq_spq_left);
1726 /* push the change in bp->spq_left and towards the memory */
1727 smp_mb__after_atomic_inc();
1728
1729 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1730
1731 return;
1732}
1733
1734void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1735 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
1736{
1737 u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
1738
1739 bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
1740 start);
1741}
1742
1743irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1744{
1745 struct bnx2x *bp = netdev_priv(dev_instance);
1746 u16 status = bnx2x_ack_int(bp);
1747 u16 mask;
1748 int i;
1749 u8 cos;
1750
1751 /* Return here if interrupt is shared and it's not for us */
1752 if (unlikely(status == 0)) {
1753 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1754 return IRQ_NONE;
1755 }
1756 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1757
1758#ifdef BNX2X_STOP_ON_ERROR
1759 if (unlikely(bp->panic))
1760 return IRQ_HANDLED;
1761#endif
1762
1763 for_each_eth_queue(bp, i) {
1764 struct bnx2x_fastpath *fp = &bp->fp[i];
1765
1766 mask = 0x2 << (fp->index + CNIC_PRESENT);
1767 if (status & mask) {
1768 /* Handle Rx or Tx according to SB id */
1769 prefetch(fp->rx_cons_sb);
1770 for_each_cos_in_tx_queue(fp, cos)
1771 prefetch(fp->txdata[cos].tx_cons_sb);
1772 prefetch(&fp->sb_running_index[SM_RX_ID]);
1773 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1774 status &= ~mask;
1775 }
1776 }
1777
1778#ifdef BCM_CNIC
1779 mask = 0x2;
1780 if (status & (mask | 0x1)) {
1781 struct cnic_ops *c_ops = NULL;
1782
1783 if (likely(bp->state == BNX2X_STATE_OPEN)) {
1784 rcu_read_lock();
1785 c_ops = rcu_dereference(bp->cnic_ops);
1786 if (c_ops)
1787 c_ops->cnic_handler(bp->cnic_data, NULL);
1788 rcu_read_unlock();
1789 }
1790
1791 status &= ~mask;
1792 }
1793#endif
1794
1795 if (unlikely(status & 0x1)) {
1796 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1797
1798 status &= ~0x1;
1799 if (!status)
1800 return IRQ_HANDLED;
1801 }
1802
1803 if (unlikely(status))
1804 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1805 status);
1806
1807 return IRQ_HANDLED;
1808}
1809
1810/* Link */
1811
1812/*
1813 * General service functions
1814 */
1815
1816int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1817{
1818 u32 lock_status;
1819 u32 resource_bit = (1 << resource);
1820 int func = BP_FUNC(bp);
1821 u32 hw_lock_control_reg;
1822 int cnt;
1823
1824 /* Validating that the resource is within range */
1825 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1826 DP(NETIF_MSG_HW,
1827 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1828 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1829 return -EINVAL;
1830 }
1831
1832 if (func <= 5) {
1833 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1834 } else {
1835 hw_lock_control_reg =
1836 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1837 }
1838
1839 /* Validating that the resource is not already taken */
1840 lock_status = REG_RD(bp, hw_lock_control_reg);
1841 if (lock_status & resource_bit) {
1842 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1843 lock_status, resource_bit);
1844 return -EEXIST;
1845 }
1846
1847 /* Try for 5 second every 5ms */
1848 for (cnt = 0; cnt < 1000; cnt++) {
1849 /* Try to acquire the lock */
1850 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1851 lock_status = REG_RD(bp, hw_lock_control_reg);
1852 if (lock_status & resource_bit)
1853 return 0;
1854
1855 msleep(5);
1856 }
1857 DP(NETIF_MSG_HW, "Timeout\n");
1858 return -EAGAIN;
1859}
1860
1861int bnx2x_release_leader_lock(struct bnx2x *bp)
1862{
1863 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1864}
1865
1866int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1867{
1868 u32 lock_status;
1869 u32 resource_bit = (1 << resource);
1870 int func = BP_FUNC(bp);
1871 u32 hw_lock_control_reg;
1872
1873 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1874
1875 /* Validating that the resource is within range */
1876 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1877 DP(NETIF_MSG_HW,
1878 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1879 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1880 return -EINVAL;
1881 }
1882
1883 if (func <= 5) {
1884 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1885 } else {
1886 hw_lock_control_reg =
1887 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1888 }
1889
1890 /* Validating that the resource is currently taken */
1891 lock_status = REG_RD(bp, hw_lock_control_reg);
1892 if (!(lock_status & resource_bit)) {
1893 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1894 lock_status, resource_bit);
1895 return -EFAULT;
1896 }
1897
1898 REG_WR(bp, hw_lock_control_reg, resource_bit);
1899 return 0;
1900}
1901
1902
1903int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1904{
1905 /* The GPIO should be swapped if swap register is set and active */
1906 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1907 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1908 int gpio_shift = gpio_num +
1909 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1910 u32 gpio_mask = (1 << gpio_shift);
1911 u32 gpio_reg;
1912 int value;
1913
1914 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1915 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1916 return -EINVAL;
1917 }
1918
1919 /* read GPIO value */
1920 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1921
1922 /* get the requested pin value */
1923 if ((gpio_reg & gpio_mask) == gpio_mask)
1924 value = 1;
1925 else
1926 value = 0;
1927
1928 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1929
1930 return value;
1931}
1932
1933int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1934{
1935 /* The GPIO should be swapped if swap register is set and active */
1936 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1937 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1938 int gpio_shift = gpio_num +
1939 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1940 u32 gpio_mask = (1 << gpio_shift);
1941 u32 gpio_reg;
1942
1943 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1944 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1945 return -EINVAL;
1946 }
1947
1948 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1949 /* read GPIO and mask except the float bits */
1950 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1951
1952 switch (mode) {
1953 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1954 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1955 gpio_num, gpio_shift);
1956 /* clear FLOAT and set CLR */
1957 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1958 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1959 break;
1960
1961 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1962 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1963 gpio_num, gpio_shift);
1964 /* clear FLOAT and set SET */
1965 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1966 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1967 break;
1968
1969 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1970 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1971 gpio_num, gpio_shift);
1972 /* set FLOAT */
1973 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1974 break;
1975
1976 default:
1977 break;
1978 }
1979
1980 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1981 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1982
1983 return 0;
1984}
1985
1986int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
1987{
1988 u32 gpio_reg = 0;
1989 int rc = 0;
1990
1991 /* Any port swapping should be handled by caller. */
1992
1993 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1994 /* read GPIO and mask except the float bits */
1995 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1996 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1997 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1998 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1999
2000 switch (mode) {
2001 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2002 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2003 /* set CLR */
2004 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2005 break;
2006
2007 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2008 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2009 /* set SET */
2010 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2011 break;
2012
2013 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2014 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2015 /* set FLOAT */
2016 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2017 break;
2018
2019 default:
2020 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2021 rc = -EINVAL;
2022 break;
2023 }
2024
2025 if (rc == 0)
2026 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2027
2028 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2029
2030 return rc;
2031}
2032
2033int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2034{
2035 /* The GPIO should be swapped if swap register is set and active */
2036 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2037 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2038 int gpio_shift = gpio_num +
2039 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2040 u32 gpio_mask = (1 << gpio_shift);
2041 u32 gpio_reg;
2042
2043 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2044 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2045 return -EINVAL;
2046 }
2047
2048 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2049 /* read GPIO int */
2050 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2051
2052 switch (mode) {
2053 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2054 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2055 "output low\n", gpio_num, gpio_shift);
2056 /* clear SET and set CLR */
2057 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2058 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2059 break;
2060
2061 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2062 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2063 "output high\n", gpio_num, gpio_shift);
2064 /* clear CLR and set SET */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2067 break;
2068
2069 default:
2070 break;
2071 }
2072
2073 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2074 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2075
2076 return 0;
2077}
2078
2079static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2080{
2081 u32 spio_mask = (1 << spio_num);
2082 u32 spio_reg;
2083
2084 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2085 (spio_num > MISC_REGISTERS_SPIO_7)) {
2086 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2087 return -EINVAL;
2088 }
2089
2090 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2091 /* read SPIO and mask except the float bits */
2092 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2093
2094 switch (mode) {
2095 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2096 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2097 /* clear FLOAT and set CLR */
2098 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2099 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2100 break;
2101
2102 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2103 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2104 /* clear FLOAT and set SET */
2105 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2107 break;
2108
2109 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2110 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2111 /* set FLOAT */
2112 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113 break;
2114
2115 default:
2116 break;
2117 }
2118
2119 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2120 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2121
2122 return 0;
2123}
2124
2125void bnx2x_calc_fc_adv(struct bnx2x *bp)
2126{
2127 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2128 switch (bp->link_vars.ieee_fc &
2129 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2130 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2131 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2132 ADVERTISED_Pause);
2133 break;
2134
2135 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2136 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2137 ADVERTISED_Pause);
2138 break;
2139
2140 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2141 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2142 break;
2143
2144 default:
2145 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2146 ADVERTISED_Pause);
2147 break;
2148 }
2149}
2150
2151u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2152{
2153 if (!BP_NOMCP(bp)) {
2154 u8 rc;
2155 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
2156 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2157 /*
2158 * Initialize link parameters structure variables
2159 * It is recommended to turn off RX FC for jumbo frames
2160 * for better performance
2161 */
2162 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2163 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2164 else
2165 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2166
2167 bnx2x_acquire_phy_lock(bp);
2168
2169 if (load_mode == LOAD_DIAG) {
2170 struct link_params *lp = &bp->link_params;
2171 lp->loopback_mode = LOOPBACK_XGXS;
2172 /* do PHY loopback at 10G speed, if possible */
2173 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2174 if (lp->speed_cap_mask[cfx_idx] &
2175 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2176 lp->req_line_speed[cfx_idx] =
2177 SPEED_10000;
2178 else
2179 lp->req_line_speed[cfx_idx] =
2180 SPEED_1000;
2181 }
2182 }
2183
2184 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2185
2186 bnx2x_release_phy_lock(bp);
2187
2188 bnx2x_calc_fc_adv(bp);
2189
2190 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2191 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2192 bnx2x_link_report(bp);
2193 } else
2194 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2195 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2196 return rc;
2197 }
2198 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2199 return -EINVAL;
2200}
2201
2202void bnx2x_link_set(struct bnx2x *bp)
2203{
2204 if (!BP_NOMCP(bp)) {
2205 bnx2x_acquire_phy_lock(bp);
2206 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2207 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2208 bnx2x_release_phy_lock(bp);
2209
2210 bnx2x_calc_fc_adv(bp);
2211 } else
2212 BNX2X_ERR("Bootcode is missing - can not set link\n");
2213}
2214
2215static void bnx2x__link_reset(struct bnx2x *bp)
2216{
2217 if (!BP_NOMCP(bp)) {
2218 bnx2x_acquire_phy_lock(bp);
2219 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2220 bnx2x_release_phy_lock(bp);
2221 } else
2222 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2223}
2224
2225u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2226{
2227 u8 rc = 0;
2228
2229 if (!BP_NOMCP(bp)) {
2230 bnx2x_acquire_phy_lock(bp);
2231 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2232 is_serdes);
2233 bnx2x_release_phy_lock(bp);
2234 } else
2235 BNX2X_ERR("Bootcode is missing - can not test link\n");
2236
2237 return rc;
2238}
2239
2240static void bnx2x_init_port_minmax(struct bnx2x *bp)
2241{
2242 u32 r_param = bp->link_vars.line_speed / 8;
2243 u32 fair_periodic_timeout_usec;
2244 u32 t_fair;
2245
2246 memset(&(bp->cmng.rs_vars), 0,
2247 sizeof(struct rate_shaping_vars_per_port));
2248 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2249
2250 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2251 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2252
2253 /* this is the threshold below which no timer arming will occur
2254 1.25 coefficient is for the threshold to be a little bigger
2255 than the real time, to compensate for timer in-accuracy */
2256 bp->cmng.rs_vars.rs_threshold =
2257 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2258
2259 /* resolution of fairness timer */
2260 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2261 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2262 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2263
2264 /* this is the threshold below which we won't arm the timer anymore */
2265 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2266
2267 /* we multiply by 1e3/8 to get bytes/msec.
2268 We don't want the credits to pass a credit
2269 of the t_fair*FAIR_MEM (algorithm resolution) */
2270 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2271 /* since each tick is 4 usec */
2272 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2273}
2274
2275/* Calculates the sum of vn_min_rates.
2276 It's needed for further normalizing of the min_rates.
2277 Returns:
2278 sum of vn_min_rates.
2279 or
2280 0 - if all the min_rates are 0.
2281 In the later case fainess algorithm should be deactivated.
2282 If not all min_rates are zero then those that are zeroes will be set to 1.
2283 */
2284static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2285{
2286 int all_zero = 1;
2287 int vn;
2288
2289 bp->vn_weight_sum = 0;
2290 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2291 u32 vn_cfg = bp->mf_config[vn];
2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2294
2295 /* Skip hidden vns */
2296 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2297 continue;
2298
2299 /* If min rate is zero - set it to 1 */
2300 if (!vn_min_rate)
2301 vn_min_rate = DEF_MIN_RATE;
2302 else
2303 all_zero = 0;
2304
2305 bp->vn_weight_sum += vn_min_rate;
2306 }
2307
2308 /* if ETS or all min rates are zeros - disable fairness */
2309 if (BNX2X_IS_ETS_ENABLED(bp)) {
2310 bp->cmng.flags.cmng_enables &=
2311 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2312 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2313 } else if (all_zero) {
2314 bp->cmng.flags.cmng_enables &=
2315 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2316 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2317 " fairness will be disabled\n");
2318 } else
2319 bp->cmng.flags.cmng_enables |=
2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2321}
2322
2323/* returns func by VN for current port */
2324static inline int func_by_vn(struct bnx2x *bp, int vn)
2325{
2326 return 2 * vn + BP_PORT(bp);
2327}
2328
2329static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2330{
2331 struct rate_shaping_vars_per_vn m_rs_vn;
2332 struct fairness_vars_per_vn m_fair_vn;
2333 u32 vn_cfg = bp->mf_config[vn];
2334 int func = func_by_vn(bp, vn);
2335 u16 vn_min_rate, vn_max_rate;
2336 int i;
2337
2338 /* If function is hidden - set min and max to zeroes */
2339 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2340 vn_min_rate = 0;
2341 vn_max_rate = 0;
2342
2343 } else {
2344 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2345
2346 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2347 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2348 /* If fairness is enabled (not all min rates are zeroes) and
2349 if current min rate is zero - set it to 1.
2350 This is a requirement of the algorithm. */
2351 if (bp->vn_weight_sum && (vn_min_rate == 0))
2352 vn_min_rate = DEF_MIN_RATE;
2353
2354 if (IS_MF_SI(bp))
2355 /* maxCfg in percents of linkspeed */
2356 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2357 else
2358 /* maxCfg is absolute in 100Mb units */
2359 vn_max_rate = maxCfg * 100;
2360 }
2361
2362 DP(NETIF_MSG_IFUP,
2363 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2364 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2365
2366 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2367 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2368
2369 /* global vn counter - maximal Mbps for this vn */
2370 m_rs_vn.vn_counter.rate = vn_max_rate;
2371
2372 /* quota - number of bytes transmitted in this period */
2373 m_rs_vn.vn_counter.quota =
2374 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2375
2376 if (bp->vn_weight_sum) {
2377 /* credit for each period of the fairness algorithm:
2378 number of bytes in T_FAIR (the vn share the port rate).
2379 vn_weight_sum should not be larger than 10000, thus
2380 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2381 than zero */
2382 m_fair_vn.vn_credit_delta =
2383 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2384 (8 * bp->vn_weight_sum))),
2385 (bp->cmng.fair_vars.fair_threshold +
2386 MIN_ABOVE_THRESH));
2387 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2388 m_fair_vn.vn_credit_delta);
2389 }
2390
2391 /* Store it to internal memory */
2392 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2393 REG_WR(bp, BAR_XSTRORM_INTMEM +
2394 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2395 ((u32 *)(&m_rs_vn))[i]);
2396
2397 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2398 REG_WR(bp, BAR_XSTRORM_INTMEM +
2399 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2400 ((u32 *)(&m_fair_vn))[i]);
2401}
2402
2403static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2404{
2405 if (CHIP_REV_IS_SLOW(bp))
2406 return CMNG_FNS_NONE;
2407 if (IS_MF(bp))
2408 return CMNG_FNS_MINMAX;
2409
2410 return CMNG_FNS_NONE;
2411}
2412
2413void bnx2x_read_mf_cfg(struct bnx2x *bp)
2414{
2415 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2416
2417 if (BP_NOMCP(bp))
2418 return; /* what should be the default bvalue in this case */
2419
2420 /* For 2 port configuration the absolute function number formula
2421 * is:
2422 * abs_func = 2 * vn + BP_PORT + BP_PATH
2423 *
2424 * and there are 4 functions per port
2425 *
2426 * For 4 port configuration it is
2427 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2428 *
2429 * and there are 2 functions per port
2430 */
2431 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2432 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2433
2434 if (func >= E1H_FUNC_MAX)
2435 break;
2436
2437 bp->mf_config[vn] =
2438 MF_CFG_RD(bp, func_mf_config[func].config);
2439 }
2440}
2441
2442static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2443{
2444
2445 if (cmng_type == CMNG_FNS_MINMAX) {
2446 int vn;
2447
2448 /* clear cmng_enables */
2449 bp->cmng.flags.cmng_enables = 0;
2450
2451 /* read mf conf from shmem */
2452 if (read_cfg)
2453 bnx2x_read_mf_cfg(bp);
2454
2455 /* Init rate shaping and fairness contexts */
2456 bnx2x_init_port_minmax(bp);
2457
2458 /* vn_weight_sum and enable fairness if not 0 */
2459 bnx2x_calc_vn_weight_sum(bp);
2460
2461 /* calculate and set min-max rate for each vn */
2462 if (bp->port.pmf)
2463 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2464 bnx2x_init_vn_minmax(bp, vn);
2465
2466 /* always enable rate shaping and fairness */
2467 bp->cmng.flags.cmng_enables |=
2468 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2469 if (!bp->vn_weight_sum)
2470 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2471 " fairness will be disabled\n");
2472 return;
2473 }
2474
2475 /* rate shaping and fairness are disabled */
2476 DP(NETIF_MSG_IFUP,
2477 "rate shaping and fairness are disabled\n");
2478}
2479
2480static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2481{
2482 int func;
2483 int vn;
2484
2485 /* Set the attention towards other drivers on the same port */
2486 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2487 if (vn == BP_VN(bp))
2488 continue;
2489
2490 func = func_by_vn(bp, vn);
2491 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2492 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2493 }
2494}
2495
2496/* This function is called upon link interrupt */
2497static void bnx2x_link_attn(struct bnx2x *bp)
2498{
2499 /* Make sure that we are synced with the current statistics */
2500 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2501
2502 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2503
2504 if (bp->link_vars.link_up) {
2505
2506 /* dropless flow control */
2507 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2508 int port = BP_PORT(bp);
2509 u32 pause_enabled = 0;
2510
2511 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2512 pause_enabled = 1;
2513
2514 REG_WR(bp, BAR_USTRORM_INTMEM +
2515 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2516 pause_enabled);
2517 }
2518
2519 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2520 struct host_port_stats *pstats;
2521
2522 pstats = bnx2x_sp(bp, port_stats);
2523 /* reset old mac stats */
2524 memset(&(pstats->mac_stx[0]), 0,
2525 sizeof(struct mac_stx));
2526 }
2527 if (bp->state == BNX2X_STATE_OPEN)
2528 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2529 }
2530
2531 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2532 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2533
2534 if (cmng_fns != CMNG_FNS_NONE) {
2535 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2536 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2537 } else
2538 /* rate shaping and fairness are disabled */
2539 DP(NETIF_MSG_IFUP,
2540 "single function mode without fairness\n");
2541 }
2542
2543 __bnx2x_link_report(bp);
2544
2545 if (IS_MF(bp))
2546 bnx2x_link_sync_notify(bp);
2547}
2548
2549void bnx2x__link_status_update(struct bnx2x *bp)
2550{
2551 if (bp->state != BNX2X_STATE_OPEN)
2552 return;
2553
2554 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2555
2556 if (bp->link_vars.link_up)
2557 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2558 else
2559 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2560
2561 /* indicate link status */
2562 bnx2x_link_report(bp);
2563}
2564
2565static void bnx2x_pmf_update(struct bnx2x *bp)
2566{
2567 int port = BP_PORT(bp);
2568 u32 val;
2569
2570 bp->port.pmf = 1;
2571 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2572
2573 /*
2574 * We need the mb() to ensure the ordering between the writing to
2575 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2576 */
2577 smp_mb();
2578
2579 /* queue a periodic task */
2580 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2581
2582 bnx2x_dcbx_pmf_update(bp);
2583
2584 /* enable nig attention */
2585 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2586 if (bp->common.int_block == INT_BLOCK_HC) {
2587 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2588 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2589 } else if (!CHIP_IS_E1x(bp)) {
2590 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2591 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2592 }
2593
2594 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2595}
2596
2597/* end of Link */
2598
2599/* slow path */
2600
2601/*
2602 * General service functions
2603 */
2604
2605/* send the MCP a request, block until there is a reply */
2606u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2607{
2608 int mb_idx = BP_FW_MB_IDX(bp);
2609 u32 seq;
2610 u32 rc = 0;
2611 u32 cnt = 1;
2612 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2613
2614 mutex_lock(&bp->fw_mb_mutex);
2615 seq = ++bp->fw_seq;
2616 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2617 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2618
2619 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2620 (command | seq), param);
2621
2622 do {
2623 /* let the FW do it's magic ... */
2624 msleep(delay);
2625
2626 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2627
2628 /* Give the FW up to 5 second (500*10ms) */
2629 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2630
2631 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2632 cnt*delay, rc, seq);
2633
2634 /* is this a reply to our command? */
2635 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2636 rc &= FW_MSG_CODE_MASK;
2637 else {
2638 /* FW BUG! */
2639 BNX2X_ERR("FW failed to respond!\n");
2640 bnx2x_fw_dump(bp);
2641 rc = 0;
2642 }
2643 mutex_unlock(&bp->fw_mb_mutex);
2644
2645 return rc;
2646}
2647
2648static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2649{
2650#ifdef BCM_CNIC
2651 /* Statistics are not supported for CNIC Clients at the moment */
2652 if (IS_FCOE_FP(fp))
2653 return false;
2654#endif
2655 return true;
2656}
2657
2658void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2659{
2660 if (CHIP_IS_E1x(bp)) {
2661 struct tstorm_eth_function_common_config tcfg = {0};
2662
2663 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2664 }
2665
2666 /* Enable the function in the FW */
2667 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2668 storm_memset_func_en(bp, p->func_id, 1);
2669
2670 /* spq */
2671 if (p->func_flgs & FUNC_FLG_SPQ) {
2672 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2673 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2674 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2675 }
2676}
2677
2678/**
2679 * bnx2x_get_tx_only_flags - Return common flags
2680 *
2681 * @bp device handle
2682 * @fp queue handle
2683 * @zero_stats TRUE if statistics zeroing is needed
2684 *
2685 * Return the flags that are common for the Tx-only and not normal connections.
2686 */
2687static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2688 struct bnx2x_fastpath *fp,
2689 bool zero_stats)
2690{
2691 unsigned long flags = 0;
2692
2693 /* PF driver will always initialize the Queue to an ACTIVE state */
2694 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2695
2696 /* tx only connections collect statistics (on the same index as the
2697 * parent connection). The statistics are zeroed when the parent
2698 * connection is initialized.
2699 */
2700 if (stat_counter_valid(bp, fp)) {
2701 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2702 if (zero_stats)
2703 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2704 }
2705
2706 return flags;
2707}
2708
2709static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2710 struct bnx2x_fastpath *fp,
2711 bool leading)
2712{
2713 unsigned long flags = 0;
2714
2715 /* calculate other queue flags */
2716 if (IS_MF_SD(bp))
2717 __set_bit(BNX2X_Q_FLG_OV, &flags);
2718
2719 if (IS_FCOE_FP(fp))
2720 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2721
2722 if (!fp->disable_tpa) {
2723 __set_bit(BNX2X_Q_FLG_TPA, &flags);
2724 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
2725 }
2726
2727 if (leading) {
2728 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
2729 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
2730 }
2731
2732 /* Always set HW VLAN stripping */
2733 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2734
2735
2736 return flags | bnx2x_get_common_flags(bp, fp, true);
2737}
2738
2739static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
2740 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
2741 u8 cos)
2742{
2743 gen_init->stat_id = bnx2x_stats_id(fp);
2744 gen_init->spcl_id = fp->cl_id;
2745
2746 /* Always use mini-jumbo MTU for FCoE L2 ring */
2747 if (IS_FCOE_FP(fp))
2748 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2749 else
2750 gen_init->mtu = bp->dev->mtu;
2751
2752 gen_init->cos = cos;
2753}
2754
2755static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2756 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2757 struct bnx2x_rxq_setup_params *rxq_init)
2758{
2759 u8 max_sge = 0;
2760 u16 sge_sz = 0;
2761 u16 tpa_agg_size = 0;
2762
2763 if (!fp->disable_tpa) {
2764 pause->sge_th_lo = SGE_TH_LO(bp);
2765 pause->sge_th_hi = SGE_TH_HI(bp);
2766
2767 /* validate SGE ring has enough to cross high threshold */
2768 WARN_ON(bp->dropless_fc &&
2769 pause->sge_th_hi + FW_PREFETCH_CNT >
2770 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2771
2772 tpa_agg_size = min_t(u32,
2773 (min_t(u32, 8, MAX_SKB_FRAGS) *
2774 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2775 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2776 SGE_PAGE_SHIFT;
2777 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2778 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2779 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2780 0xffff);
2781 }
2782
2783 /* pause - not for e1 */
2784 if (!CHIP_IS_E1(bp)) {
2785 pause->bd_th_lo = BD_TH_LO(bp);
2786 pause->bd_th_hi = BD_TH_HI(bp);
2787
2788 pause->rcq_th_lo = RCQ_TH_LO(bp);
2789 pause->rcq_th_hi = RCQ_TH_HI(bp);
2790 /*
2791 * validate that rings have enough entries to cross
2792 * high thresholds
2793 */
2794 WARN_ON(bp->dropless_fc &&
2795 pause->bd_th_hi + FW_PREFETCH_CNT >
2796 bp->rx_ring_size);
2797 WARN_ON(bp->dropless_fc &&
2798 pause->rcq_th_hi + FW_PREFETCH_CNT >
2799 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2800
2801 pause->pri_map = 1;
2802 }
2803
2804 /* rxq setup */
2805 rxq_init->dscr_map = fp->rx_desc_mapping;
2806 rxq_init->sge_map = fp->rx_sge_mapping;
2807 rxq_init->rcq_map = fp->rx_comp_mapping;
2808 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2809
2810 /* This should be a maximum number of data bytes that may be
2811 * placed on the BD (not including paddings).
2812 */
2813 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
2814 IP_HEADER_ALIGNMENT_PADDING;
2815
2816 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2817 rxq_init->tpa_agg_sz = tpa_agg_size;
2818 rxq_init->sge_buf_sz = sge_sz;
2819 rxq_init->max_sges_pkt = max_sge;
2820 rxq_init->rss_engine_id = BP_FUNC(bp);
2821
2822 /* Maximum number or simultaneous TPA aggregation for this Queue.
2823 *
2824 * For PF Clients it should be the maximum avaliable number.
2825 * VF driver(s) may want to define it to a smaller value.
2826 */
2827 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2828
2829 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2830 rxq_init->fw_sb_id = fp->fw_sb_id;
2831
2832 if (IS_FCOE_FP(fp))
2833 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2834 else
2835 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
2836}
2837
2838static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
2839 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
2840 u8 cos)
2841{
2842 txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
2843 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
2844 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2845 txq_init->fw_sb_id = fp->fw_sb_id;
2846
2847 /*
2848 * set the tss leading client id for TX classfication ==
2849 * leading RSS client id
2850 */
2851 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
2852
2853 if (IS_FCOE_FP(fp)) {
2854 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2855 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2856 }
2857}
2858
2859static void bnx2x_pf_init(struct bnx2x *bp)
2860{
2861 struct bnx2x_func_init_params func_init = {0};
2862 struct event_ring_data eq_data = { {0} };
2863 u16 flags;
2864
2865 if (!CHIP_IS_E1x(bp)) {
2866 /* reset IGU PF statistics: MSIX + ATTN */
2867 /* PF */
2868 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2869 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2870 (CHIP_MODE_IS_4_PORT(bp) ?
2871 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2872 /* ATTN */
2873 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2874 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2875 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2876 (CHIP_MODE_IS_4_PORT(bp) ?
2877 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2878 }
2879
2880 /* function setup flags */
2881 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2882
2883 /* This flag is relevant for E1x only.
2884 * E2 doesn't have a TPA configuration in a function level.
2885 */
2886 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2887
2888 func_init.func_flgs = flags;
2889 func_init.pf_id = BP_FUNC(bp);
2890 func_init.func_id = BP_FUNC(bp);
2891 func_init.spq_map = bp->spq_mapping;
2892 func_init.spq_prod = bp->spq_prod_idx;
2893
2894 bnx2x_func_init(bp, &func_init);
2895
2896 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2897
2898 /*
2899 * Congestion management values depend on the link rate
2900 * There is no active link so initial link rate is set to 10 Gbps.
2901 * When the link comes up The congestion management values are
2902 * re-calculated according to the actual link rate.
2903 */
2904 bp->link_vars.line_speed = SPEED_10000;
2905 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2906
2907 /* Only the PMF sets the HW */
2908 if (bp->port.pmf)
2909 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2910
2911 /* init Event Queue */
2912 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2913 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2914 eq_data.producer = bp->eq_prod;
2915 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2916 eq_data.sb_id = DEF_SB_ID;
2917 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2918}
2919
2920
2921static void bnx2x_e1h_disable(struct bnx2x *bp)
2922{
2923 int port = BP_PORT(bp);
2924
2925 bnx2x_tx_disable(bp);
2926
2927 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2928}
2929
2930static void bnx2x_e1h_enable(struct bnx2x *bp)
2931{
2932 int port = BP_PORT(bp);
2933
2934 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2935
2936 /* Tx queue should be only reenabled */
2937 netif_tx_wake_all_queues(bp->dev);
2938
2939 /*
2940 * Should not call netif_carrier_on since it will be called if the link
2941 * is up when checking for link state
2942 */
2943}
2944
2945/* called due to MCP event (on pmf):
2946 * reread new bandwidth configuration
2947 * configure FW
2948 * notify others function about the change
2949 */
2950static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2951{
2952 if (bp->link_vars.link_up) {
2953 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2954 bnx2x_link_sync_notify(bp);
2955 }
2956 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2957}
2958
2959static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2960{
2961 bnx2x_config_mf_bw(bp);
2962 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2963}
2964
2965static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2966{
2967 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2968
2969 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2970
2971 /*
2972 * This is the only place besides the function initialization
2973 * where the bp->flags can change so it is done without any
2974 * locks
2975 */
2976 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2977 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2978 bp->flags |= MF_FUNC_DIS;
2979
2980 bnx2x_e1h_disable(bp);
2981 } else {
2982 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2983 bp->flags &= ~MF_FUNC_DIS;
2984
2985 bnx2x_e1h_enable(bp);
2986 }
2987 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2988 }
2989 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2990 bnx2x_config_mf_bw(bp);
2991 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2992 }
2993
2994 /* Report results to MCP */
2995 if (dcc_event)
2996 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2997 else
2998 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2999}
3000
3001/* must be called under the spq lock */
3002static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3003{
3004 struct eth_spe *next_spe = bp->spq_prod_bd;
3005
3006 if (bp->spq_prod_bd == bp->spq_last_bd) {
3007 bp->spq_prod_bd = bp->spq;
3008 bp->spq_prod_idx = 0;
3009 DP(NETIF_MSG_TIMER, "end of spq\n");
3010 } else {
3011 bp->spq_prod_bd++;
3012 bp->spq_prod_idx++;
3013 }
3014 return next_spe;
3015}
3016
3017/* must be called under the spq lock */
3018static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
3019{
3020 int func = BP_FUNC(bp);
3021
3022 /*
3023 * Make sure that BD data is updated before writing the producer:
3024 * BD data is written to the memory, the producer is read from the
3025 * memory, thus we need a full memory barrier to ensure the ordering.
3026 */
3027 mb();
3028
3029 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3030 bp->spq_prod_idx);
3031 mmiowb();
3032}
3033
3034/**
3035 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3036 *
3037 * @cmd: command to check
3038 * @cmd_type: command type
3039 */
3040static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3041{
3042 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3043 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3044 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3045 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3046 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3047 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3048 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3049 return true;
3050 else
3051 return false;
3052
3053}
3054
3055
3056/**
3057 * bnx2x_sp_post - place a single command on an SP ring
3058 *
3059 * @bp: driver handle
3060 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
3061 * @cid: SW CID the command is related to
3062 * @data_hi: command private data address (high 32 bits)
3063 * @data_lo: command private data address (low 32 bits)
3064 * @cmd_type: command type (e.g. NONE, ETH)
3065 *
3066 * SP data is handled as if it's always an address pair, thus data fields are
3067 * not swapped to little endian in upper functions. Instead this function swaps
3068 * data as if it's two u32 fields.
3069 */
3070int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3071 u32 data_hi, u32 data_lo, int cmd_type)
3072{
3073 struct eth_spe *spe;
3074 u16 type;
3075 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3076
3077#ifdef BNX2X_STOP_ON_ERROR
3078 if (unlikely(bp->panic))
3079 return -EIO;
3080#endif
3081
3082 spin_lock_bh(&bp->spq_lock);
3083
3084 if (common) {
3085 if (!atomic_read(&bp->eq_spq_left)) {
3086 BNX2X_ERR("BUG! EQ ring full!\n");
3087 spin_unlock_bh(&bp->spq_lock);
3088 bnx2x_panic();
3089 return -EBUSY;
3090 }
3091 } else if (!atomic_read(&bp->cq_spq_left)) {
3092 BNX2X_ERR("BUG! SPQ ring full!\n");
3093 spin_unlock_bh(&bp->spq_lock);
3094 bnx2x_panic();
3095 return -EBUSY;
3096 }
3097
3098 spe = bnx2x_sp_get_next(bp);
3099
3100 /* CID needs port number to be encoded int it */
3101 spe->hdr.conn_and_cmd_data =
3102 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3103 HW_CID(bp, cid));
3104
3105 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3106
3107 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3108 SPE_HDR_FUNCTION_ID);
3109
3110 spe->hdr.type = cpu_to_le16(type);
3111
3112 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3113 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3114
3115 /*
3116 * It's ok if the actual decrement is issued towards the memory
3117 * somewhere between the spin_lock and spin_unlock. Thus no
3118 * more explict memory barrier is needed.
3119 */
3120 if (common)
3121 atomic_dec(&bp->eq_spq_left);
3122 else
3123 atomic_dec(&bp->cq_spq_left);
3124
3125
3126 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
3127 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
3128 "type(0x%x) left (CQ, EQ) (%x,%x)\n",
3129 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3130 (u32)(U64_LO(bp->spq_mapping) +
3131 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3132 HW_CID(bp, cid), data_hi, data_lo, type,
3133 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3134
3135 bnx2x_sp_prod_update(bp);
3136 spin_unlock_bh(&bp->spq_lock);
3137 return 0;
3138}
3139
3140/* acquire split MCP access lock register */
3141static int bnx2x_acquire_alr(struct bnx2x *bp)
3142{
3143 u32 j, val;
3144 int rc = 0;
3145
3146 might_sleep();
3147 for (j = 0; j < 1000; j++) {
3148 val = (1UL << 31);
3149 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3150 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3151 if (val & (1L << 31))
3152 break;
3153
3154 msleep(5);
3155 }
3156 if (!(val & (1L << 31))) {
3157 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3158 rc = -EBUSY;
3159 }
3160
3161 return rc;
3162}
3163
3164/* release split MCP access lock register */
3165static void bnx2x_release_alr(struct bnx2x *bp)
3166{
3167 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
3168}
3169
3170#define BNX2X_DEF_SB_ATT_IDX 0x0001
3171#define BNX2X_DEF_SB_IDX 0x0002
3172
3173static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3174{
3175 struct host_sp_status_block *def_sb = bp->def_status_blk;
3176 u16 rc = 0;
3177
3178 barrier(); /* status block is written to by the chip */
3179 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3180 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3181 rc |= BNX2X_DEF_SB_ATT_IDX;
3182 }
3183
3184 if (bp->def_idx != def_sb->sp_sb.running_index) {
3185 bp->def_idx = def_sb->sp_sb.running_index;
3186 rc |= BNX2X_DEF_SB_IDX;
3187 }
3188
3189 /* Do not reorder: indecies reading should complete before handling */
3190 barrier();
3191 return rc;
3192}
3193
3194/*
3195 * slow path service functions
3196 */
3197
3198static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3199{
3200 int port = BP_PORT(bp);
3201 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3202 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3203 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3204 NIG_REG_MASK_INTERRUPT_PORT0;
3205 u32 aeu_mask;
3206 u32 nig_mask = 0;
3207 u32 reg_addr;
3208
3209 if (bp->attn_state & asserted)
3210 BNX2X_ERR("IGU ERROR\n");
3211
3212 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3213 aeu_mask = REG_RD(bp, aeu_addr);
3214
3215 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3216 aeu_mask, asserted);
3217 aeu_mask &= ~(asserted & 0x3ff);
3218 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3219
3220 REG_WR(bp, aeu_addr, aeu_mask);
3221 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3222
3223 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3224 bp->attn_state |= asserted;
3225 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3226
3227 if (asserted & ATTN_HARD_WIRED_MASK) {
3228 if (asserted & ATTN_NIG_FOR_FUNC) {
3229
3230 bnx2x_acquire_phy_lock(bp);
3231
3232 /* save nig interrupt mask */
3233 nig_mask = REG_RD(bp, nig_int_mask_addr);
3234
3235 /* If nig_mask is not set, no need to call the update
3236 * function.
3237 */
3238 if (nig_mask) {
3239 REG_WR(bp, nig_int_mask_addr, 0);
3240
3241 bnx2x_link_attn(bp);
3242 }
3243
3244 /* handle unicore attn? */
3245 }
3246 if (asserted & ATTN_SW_TIMER_4_FUNC)
3247 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3248
3249 if (asserted & GPIO_2_FUNC)
3250 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3251
3252 if (asserted & GPIO_3_FUNC)
3253 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3254
3255 if (asserted & GPIO_4_FUNC)
3256 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3257
3258 if (port == 0) {
3259 if (asserted & ATTN_GENERAL_ATTN_1) {
3260 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3261 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3262 }
3263 if (asserted & ATTN_GENERAL_ATTN_2) {
3264 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3266 }
3267 if (asserted & ATTN_GENERAL_ATTN_3) {
3268 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3269 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3270 }
3271 } else {
3272 if (asserted & ATTN_GENERAL_ATTN_4) {
3273 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3274 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3275 }
3276 if (asserted & ATTN_GENERAL_ATTN_5) {
3277 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3278 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3279 }
3280 if (asserted & ATTN_GENERAL_ATTN_6) {
3281 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3282 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3283 }
3284 }
3285
3286 } /* if hardwired */
3287
3288 if (bp->common.int_block == INT_BLOCK_HC)
3289 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3290 COMMAND_REG_ATTN_BITS_SET);
3291 else
3292 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3293
3294 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3295 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3296 REG_WR(bp, reg_addr, asserted);
3297
3298 /* now set back the mask */
3299 if (asserted & ATTN_NIG_FOR_FUNC) {
3300 REG_WR(bp, nig_int_mask_addr, nig_mask);
3301 bnx2x_release_phy_lock(bp);
3302 }
3303}
3304
3305static inline void bnx2x_fan_failure(struct bnx2x *bp)
3306{
3307 int port = BP_PORT(bp);
3308 u32 ext_phy_config;
3309 /* mark the failure */
3310 ext_phy_config =
3311 SHMEM_RD(bp,
3312 dev_info.port_hw_config[port].external_phy_config);
3313
3314 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3315 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3316 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3317 ext_phy_config);
3318
3319 /* log the failure */
3320 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
3321 " the driver to shutdown the card to prevent permanent"
3322 " damage. Please contact OEM Support for assistance\n");
3323}
3324
3325static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3326{
3327 int port = BP_PORT(bp);
3328 int reg_offset;
3329 u32 val;
3330
3331 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3332 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3333
3334 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3335
3336 val = REG_RD(bp, reg_offset);
3337 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3338 REG_WR(bp, reg_offset, val);
3339
3340 BNX2X_ERR("SPIO5 hw attention\n");
3341
3342 /* Fan failure attention */
3343 bnx2x_hw_reset_phy(&bp->link_params);
3344 bnx2x_fan_failure(bp);
3345 }
3346
3347 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3348 bnx2x_acquire_phy_lock(bp);
3349 bnx2x_handle_module_detect_int(&bp->link_params);
3350 bnx2x_release_phy_lock(bp);
3351 }
3352
3353 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3354
3355 val = REG_RD(bp, reg_offset);
3356 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3357 REG_WR(bp, reg_offset, val);
3358
3359 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3360 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3361 bnx2x_panic();
3362 }
3363}
3364
3365static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3366{
3367 u32 val;
3368
3369 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3370
3371 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3372 BNX2X_ERR("DB hw attention 0x%x\n", val);
3373 /* DORQ discard attention */
3374 if (val & 0x2)
3375 BNX2X_ERR("FATAL error from DORQ\n");
3376 }
3377
3378 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3379
3380 int port = BP_PORT(bp);
3381 int reg_offset;
3382
3383 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3384 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3385
3386 val = REG_RD(bp, reg_offset);
3387 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3388 REG_WR(bp, reg_offset, val);
3389
3390 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3391 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3392 bnx2x_panic();
3393 }
3394}
3395
3396static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3397{
3398 u32 val;
3399
3400 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3401
3402 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3403 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3404 /* CFC error attention */
3405 if (val & 0x2)
3406 BNX2X_ERR("FATAL error from CFC\n");
3407 }
3408
3409 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3410 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3411 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3412 /* RQ_USDMDP_FIFO_OVERFLOW */
3413 if (val & 0x18000)
3414 BNX2X_ERR("FATAL error from PXP\n");
3415
3416 if (!CHIP_IS_E1x(bp)) {
3417 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3418 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3419 }
3420 }
3421
3422 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3423
3424 int port = BP_PORT(bp);
3425 int reg_offset;
3426
3427 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3428 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3429
3430 val = REG_RD(bp, reg_offset);
3431 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3432 REG_WR(bp, reg_offset, val);
3433
3434 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3435 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3436 bnx2x_panic();
3437 }
3438}
3439
3440static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3441{
3442 u32 val;
3443
3444 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3445
3446 if (attn & BNX2X_PMF_LINK_ASSERT) {
3447 int func = BP_FUNC(bp);
3448
3449 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3450 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3451 func_mf_config[BP_ABS_FUNC(bp)].config);
3452 val = SHMEM_RD(bp,
3453 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3454 if (val & DRV_STATUS_DCC_EVENT_MASK)
3455 bnx2x_dcc_event(bp,
3456 (val & DRV_STATUS_DCC_EVENT_MASK));
3457
3458 if (val & DRV_STATUS_SET_MF_BW)
3459 bnx2x_set_mf_bw(bp);
3460
3461 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3462 bnx2x_pmf_update(bp);
3463
3464 if (bp->port.pmf &&
3465 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3466 bp->dcbx_enabled > 0)
3467 /* start dcbx state machine */
3468 bnx2x_dcbx_set_params(bp,
3469 BNX2X_DCBX_STATE_NEG_RECEIVED);
3470 if (bp->link_vars.periodic_flags &
3471 PERIODIC_FLAGS_LINK_EVENT) {
3472 /* sync with link */
3473 bnx2x_acquire_phy_lock(bp);
3474 bp->link_vars.periodic_flags &=
3475 ~PERIODIC_FLAGS_LINK_EVENT;
3476 bnx2x_release_phy_lock(bp);
3477 if (IS_MF(bp))
3478 bnx2x_link_sync_notify(bp);
3479 bnx2x_link_report(bp);
3480 }
3481 /* Always call it here: bnx2x_link_report() will
3482 * prevent the link indication duplication.
3483 */
3484 bnx2x__link_status_update(bp);
3485 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3486
3487 BNX2X_ERR("MC assert!\n");
3488 bnx2x_mc_assert(bp);
3489 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3490 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3491 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3493 bnx2x_panic();
3494
3495 } else if (attn & BNX2X_MCP_ASSERT) {
3496
3497 BNX2X_ERR("MCP assert!\n");
3498 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3499 bnx2x_fw_dump(bp);
3500
3501 } else
3502 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3503 }
3504
3505 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3506 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3507 if (attn & BNX2X_GRC_TIMEOUT) {
3508 val = CHIP_IS_E1(bp) ? 0 :
3509 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3510 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3511 }
3512 if (attn & BNX2X_GRC_RSV) {
3513 val = CHIP_IS_E1(bp) ? 0 :
3514 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3515 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3516 }
3517 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3518 }
3519}
3520
3521/*
3522 * Bits map:
3523 * 0-7 - Engine0 load counter.
3524 * 8-15 - Engine1 load counter.
3525 * 16 - Engine0 RESET_IN_PROGRESS bit.
3526 * 17 - Engine1 RESET_IN_PROGRESS bit.
3527 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
3528 * on the engine
3529 * 19 - Engine1 ONE_IS_LOADED.
3530 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
3531 * leader to complete (check for both RESET_IN_PROGRESS bits and not for
3532 * just the one belonging to its engine).
3533 *
3534 */
3535#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
3536
3537#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
3538#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
3539#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
3540#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
3541#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
3542#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
3543#define BNX2X_GLOBAL_RESET_BIT 0x00040000
3544
3545/*
3546 * Set the GLOBAL_RESET bit.
3547 *
3548 * Should be run under rtnl lock
3549 */
3550void bnx2x_set_reset_global(struct bnx2x *bp)
3551{
3552 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3553
3554 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
3555 barrier();
3556 mmiowb();
3557}
3558
3559/*
3560 * Clear the GLOBAL_RESET bit.
3561 *
3562 * Should be run under rtnl lock
3563 */
3564static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
3565{
3566 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3567
3568 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
3569 barrier();
3570 mmiowb();
3571}
3572
3573/*
3574 * Checks the GLOBAL_RESET bit.
3575 *
3576 * should be run under rtnl lock
3577 */
3578static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
3579{
3580 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3581
3582 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3583 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
3584}
3585
3586/*
3587 * Clear RESET_IN_PROGRESS bit for the current engine.
3588 *
3589 * Should be run under rtnl lock
3590 */
3591static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3592{
3593 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3594 u32 bit = BP_PATH(bp) ?
3595 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3596
3597 /* Clear the bit */
3598 val &= ~bit;
3599 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3600 barrier();
3601 mmiowb();
3602}
3603
3604/*
3605 * Set RESET_IN_PROGRESS for the current engine.
3606 *
3607 * should be run under rtnl lock
3608 */
3609void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3610{
3611 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3612 u32 bit = BP_PATH(bp) ?
3613 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3614
3615 /* Set the bit */
3616 val |= bit;
3617 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3618 barrier();
3619 mmiowb();
3620}
3621
3622/*
3623 * Checks the RESET_IN_PROGRESS bit for the given engine.
3624 * should be run under rtnl lock
3625 */
3626bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
3627{
3628 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3629 u32 bit = engine ?
3630 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3631
3632 /* return false if bit is set */
3633 return (val & bit) ? false : true;
3634}
3635
3636/*
3637 * Increment the load counter for the current engine.
3638 *
3639 * should be run under rtnl lock
3640 */
3641void bnx2x_inc_load_cnt(struct bnx2x *bp)
3642{
3643 u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3644 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3645 BNX2X_PATH0_LOAD_CNT_MASK;
3646 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3647 BNX2X_PATH0_LOAD_CNT_SHIFT;
3648
3649 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3650
3651 /* get the current counter value */
3652 val1 = (val & mask) >> shift;
3653
3654 /* increment... */
3655 val1++;
3656
3657 /* clear the old value */
3658 val &= ~mask;
3659
3660 /* set the new one */
3661 val |= ((val1 << shift) & mask);
3662
3663 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3664 barrier();
3665 mmiowb();
3666}
3667
3668/**
3669 * bnx2x_dec_load_cnt - decrement the load counter
3670 *
3671 * @bp: driver handle
3672 *
3673 * Should be run under rtnl lock.
3674 * Decrements the load counter for the current engine. Returns
3675 * the new counter value.
3676 */
3677u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3678{
3679 u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3680 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3681 BNX2X_PATH0_LOAD_CNT_MASK;
3682 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3683 BNX2X_PATH0_LOAD_CNT_SHIFT;
3684
3685 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3686
3687 /* get the current counter value */
3688 val1 = (val & mask) >> shift;
3689
3690 /* decrement... */
3691 val1--;
3692
3693 /* clear the old value */
3694 val &= ~mask;
3695
3696 /* set the new one */
3697 val |= ((val1 << shift) & mask);
3698
3699 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3700 barrier();
3701 mmiowb();
3702
3703 return val1;
3704}
3705
3706/*
3707 * Read the load counter for the current engine.
3708 *
3709 * should be run under rtnl lock
3710 */
3711static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
3712{
3713 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
3714 BNX2X_PATH0_LOAD_CNT_MASK);
3715 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3716 BNX2X_PATH0_LOAD_CNT_SHIFT);
3717 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3718
3719 DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
3720
3721 val = (val & mask) >> shift;
3722
3723 DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
3724
3725 return val;
3726}
3727
3728/*
3729 * Reset the load counter for the current engine.
3730 *
3731 * should be run under rtnl lock
3732 */
3733static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3734{
3735 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3736 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3737 BNX2X_PATH0_LOAD_CNT_MASK);
3738
3739 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
3740}
3741
3742static inline void _print_next_block(int idx, const char *blk)
3743{
3744 if (idx)
3745 pr_cont(", ");
3746 pr_cont("%s", blk);
3747}
3748
3749static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
3750 bool print)
3751{
3752 int i = 0;
3753 u32 cur_bit = 0;
3754 for (i = 0; sig; i++) {
3755 cur_bit = ((u32)0x1 << i);
3756 if (sig & cur_bit) {
3757 switch (cur_bit) {
3758 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3759 if (print)
3760 _print_next_block(par_num++, "BRB");
3761 break;
3762 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3763 if (print)
3764 _print_next_block(par_num++, "PARSER");
3765 break;
3766 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3767 if (print)
3768 _print_next_block(par_num++, "TSDM");
3769 break;
3770 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3771 if (print)
3772 _print_next_block(par_num++,
3773 "SEARCHER");
3774 break;
3775 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
3776 if (print)
3777 _print_next_block(par_num++, "TCM");
3778 break;
3779 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3780 if (print)
3781 _print_next_block(par_num++, "TSEMI");
3782 break;
3783 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3784 if (print)
3785 _print_next_block(par_num++, "XPB");
3786 break;
3787 }
3788
3789 /* Clear the bit */
3790 sig &= ~cur_bit;
3791 }
3792 }
3793
3794 return par_num;
3795}
3796
3797static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
3798 bool *global, bool print)
3799{
3800 int i = 0;
3801 u32 cur_bit = 0;
3802 for (i = 0; sig; i++) {
3803 cur_bit = ((u32)0x1 << i);
3804 if (sig & cur_bit) {
3805 switch (cur_bit) {
3806 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
3807 if (print)
3808 _print_next_block(par_num++, "PBF");
3809 break;
3810 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3811 if (print)
3812 _print_next_block(par_num++, "QM");
3813 break;
3814 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
3815 if (print)
3816 _print_next_block(par_num++, "TM");
3817 break;
3818 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3819 if (print)
3820 _print_next_block(par_num++, "XSDM");
3821 break;
3822 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
3823 if (print)
3824 _print_next_block(par_num++, "XCM");
3825 break;
3826 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3827 if (print)
3828 _print_next_block(par_num++, "XSEMI");
3829 break;
3830 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3831 if (print)
3832 _print_next_block(par_num++,
3833 "DOORBELLQ");
3834 break;
3835 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
3836 if (print)
3837 _print_next_block(par_num++, "NIG");
3838 break;
3839 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3840 if (print)
3841 _print_next_block(par_num++,
3842 "VAUX PCI CORE");
3843 *global = true;
3844 break;
3845 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3846 if (print)
3847 _print_next_block(par_num++, "DEBUG");
3848 break;
3849 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3850 if (print)
3851 _print_next_block(par_num++, "USDM");
3852 break;
3853 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
3854 if (print)
3855 _print_next_block(par_num++, "UCM");
3856 break;
3857 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3858 if (print)
3859 _print_next_block(par_num++, "USEMI");
3860 break;
3861 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3862 if (print)
3863 _print_next_block(par_num++, "UPB");
3864 break;
3865 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3866 if (print)
3867 _print_next_block(par_num++, "CSDM");
3868 break;
3869 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
3870 if (print)
3871 _print_next_block(par_num++, "CCM");
3872 break;
3873 }
3874
3875 /* Clear the bit */
3876 sig &= ~cur_bit;
3877 }
3878 }
3879
3880 return par_num;
3881}
3882
3883static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
3884 bool print)
3885{
3886 int i = 0;
3887 u32 cur_bit = 0;
3888 for (i = 0; sig; i++) {
3889 cur_bit = ((u32)0x1 << i);
3890 if (sig & cur_bit) {
3891 switch (cur_bit) {
3892 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3893 if (print)
3894 _print_next_block(par_num++, "CSEMI");
3895 break;
3896 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3897 if (print)
3898 _print_next_block(par_num++, "PXP");
3899 break;
3900 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3901 if (print)
3902 _print_next_block(par_num++,
3903 "PXPPCICLOCKCLIENT");
3904 break;
3905 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3906 if (print)
3907 _print_next_block(par_num++, "CFC");
3908 break;
3909 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3910 if (print)
3911 _print_next_block(par_num++, "CDU");
3912 break;
3913 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
3914 if (print)
3915 _print_next_block(par_num++, "DMAE");
3916 break;
3917 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3918 if (print)
3919 _print_next_block(par_num++, "IGU");
3920 break;
3921 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3922 if (print)
3923 _print_next_block(par_num++, "MISC");
3924 break;
3925 }
3926
3927 /* Clear the bit */
3928 sig &= ~cur_bit;
3929 }
3930 }
3931
3932 return par_num;
3933}
3934
3935static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
3936 bool *global, bool print)
3937{
3938 int i = 0;
3939 u32 cur_bit = 0;
3940 for (i = 0; sig; i++) {
3941 cur_bit = ((u32)0x1 << i);
3942 if (sig & cur_bit) {
3943 switch (cur_bit) {
3944 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3945 if (print)
3946 _print_next_block(par_num++, "MCP ROM");
3947 *global = true;
3948 break;
3949 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3950 if (print)
3951 _print_next_block(par_num++,
3952 "MCP UMP RX");
3953 *global = true;
3954 break;
3955 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3956 if (print)
3957 _print_next_block(par_num++,
3958 "MCP UMP TX");
3959 *global = true;
3960 break;
3961 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3962 if (print)
3963 _print_next_block(par_num++,
3964 "MCP SCPAD");
3965 *global = true;
3966 break;
3967 }
3968
3969 /* Clear the bit */
3970 sig &= ~cur_bit;
3971 }
3972 }
3973
3974 return par_num;
3975}
3976
3977static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
3978 bool print)
3979{
3980 int i = 0;
3981 u32 cur_bit = 0;
3982 for (i = 0; sig; i++) {
3983 cur_bit = ((u32)0x1 << i);
3984 if (sig & cur_bit) {
3985 switch (cur_bit) {
3986 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
3987 if (print)
3988 _print_next_block(par_num++, "PGLUE_B");
3989 break;
3990 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
3991 if (print)
3992 _print_next_block(par_num++, "ATC");
3993 break;
3994 }
3995
3996 /* Clear the bit */
3997 sig &= ~cur_bit;
3998 }
3999 }
4000
4001 return par_num;
4002}
4003
4004static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4005 u32 *sig)
4006{
4007 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4008 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4009 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4010 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4011 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4012 int par_num = 0;
4013 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
4014 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
4015 "[4]:0x%08x\n",
4016 sig[0] & HW_PRTY_ASSERT_SET_0,
4017 sig[1] & HW_PRTY_ASSERT_SET_1,
4018 sig[2] & HW_PRTY_ASSERT_SET_2,
4019 sig[3] & HW_PRTY_ASSERT_SET_3,
4020 sig[4] & HW_PRTY_ASSERT_SET_4);
4021 if (print)
4022 netdev_err(bp->dev,
4023 "Parity errors detected in blocks: ");
4024 par_num = bnx2x_check_blocks_with_parity0(
4025 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4026 par_num = bnx2x_check_blocks_with_parity1(
4027 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4028 par_num = bnx2x_check_blocks_with_parity2(
4029 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4030 par_num = bnx2x_check_blocks_with_parity3(
4031 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4032 par_num = bnx2x_check_blocks_with_parity4(
4033 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4034
4035 if (print)
4036 pr_cont("\n");
4037
4038 return true;
4039 } else
4040 return false;
4041}
4042
4043/**
4044 * bnx2x_chk_parity_attn - checks for parity attentions.
4045 *
4046 * @bp: driver handle
4047 * @global: true if there was a global attention
4048 * @print: show parity attention in syslog
4049 */
4050bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4051{
4052 struct attn_route attn = { {0} };
4053 int port = BP_PORT(bp);
4054
4055 attn.sig[0] = REG_RD(bp,
4056 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4057 port*4);
4058 attn.sig[1] = REG_RD(bp,
4059 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4060 port*4);
4061 attn.sig[2] = REG_RD(bp,
4062 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4063 port*4);
4064 attn.sig[3] = REG_RD(bp,
4065 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4066 port*4);
4067
4068 if (!CHIP_IS_E1x(bp))
4069 attn.sig[4] = REG_RD(bp,
4070 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4071 port*4);
4072
4073 return bnx2x_parity_attn(bp, global, print, attn.sig);
4074}
4075
4076
4077static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4078{
4079 u32 val;
4080 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4081
4082 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4083 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4084 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4085 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4086 "ADDRESS_ERROR\n");
4087 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4088 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4089 "INCORRECT_RCV_BEHAVIOR\n");
4090 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4091 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4092 "WAS_ERROR_ATTN\n");
4093 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4094 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4095 "VF_LENGTH_VIOLATION_ATTN\n");
4096 if (val &
4097 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4098 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4099 "VF_GRC_SPACE_VIOLATION_ATTN\n");
4100 if (val &
4101 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4102 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4103 "VF_MSIX_BAR_VIOLATION_ATTN\n");
4104 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4105 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4106 "TCPL_ERROR_ATTN\n");
4107 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4108 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4109 "TCPL_IN_TWO_RCBS_ATTN\n");
4110 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4111 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
4112 "CSSNOOP_FIFO_OVERFLOW\n");
4113 }
4114 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4115 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4116 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4117 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4118 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4119 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4120 BNX2X_ERR("ATC_ATC_INT_STS_REG"
4121 "_ATC_TCPL_TO_NOT_PEND\n");
4122 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4123 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
4124 "ATC_GPA_MULTIPLE_HITS\n");
4125 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4126 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
4127 "ATC_RCPL_TO_EMPTY_CNT\n");
4128 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4129 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4130 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4131 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
4132 "ATC_IREQ_LESS_THAN_STU\n");
4133 }
4134
4135 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4136 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4137 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4138 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4139 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4140 }
4141
4142}
4143
4144static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4145{
4146 struct attn_route attn, *group_mask;
4147 int port = BP_PORT(bp);
4148 int index;
4149 u32 reg_addr;
4150 u32 val;
4151 u32 aeu_mask;
4152 bool global = false;
4153
4154 /* need to take HW lock because MCP or other port might also
4155 try to handle this event */
4156 bnx2x_acquire_alr(bp);
4157
4158 if (bnx2x_chk_parity_attn(bp, &global, true)) {
4159#ifndef BNX2X_STOP_ON_ERROR
4160 bp->recovery_state = BNX2X_RECOVERY_INIT;
4161 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4162 /* Disable HW interrupts */
4163 bnx2x_int_disable(bp);
4164 /* In case of parity errors don't handle attentions so that
4165 * other function would "see" parity errors.
4166 */
4167#else
4168 bnx2x_panic();
4169#endif
4170 bnx2x_release_alr(bp);
4171 return;
4172 }
4173
4174 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4175 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4176 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4177 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4178 if (!CHIP_IS_E1x(bp))
4179 attn.sig[4] =
4180 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4181 else
4182 attn.sig[4] = 0;
4183
4184 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4185 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4186
4187 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4188 if (deasserted & (1 << index)) {
4189 group_mask = &bp->attn_group[index];
4190
4191 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
4192 "%08x %08x %08x\n",
4193 index,
4194 group_mask->sig[0], group_mask->sig[1],
4195 group_mask->sig[2], group_mask->sig[3],
4196 group_mask->sig[4]);
4197
4198 bnx2x_attn_int_deasserted4(bp,
4199 attn.sig[4] & group_mask->sig[4]);
4200 bnx2x_attn_int_deasserted3(bp,
4201 attn.sig[3] & group_mask->sig[3]);
4202 bnx2x_attn_int_deasserted1(bp,
4203 attn.sig[1] & group_mask->sig[1]);
4204 bnx2x_attn_int_deasserted2(bp,
4205 attn.sig[2] & group_mask->sig[2]);
4206 bnx2x_attn_int_deasserted0(bp,
4207 attn.sig[0] & group_mask->sig[0]);
4208 }
4209 }
4210
4211 bnx2x_release_alr(bp);
4212
4213 if (bp->common.int_block == INT_BLOCK_HC)
4214 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4215 COMMAND_REG_ATTN_BITS_CLR);
4216 else
4217 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4218
4219 val = ~deasserted;
4220 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4221 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4222 REG_WR(bp, reg_addr, val);
4223
4224 if (~bp->attn_state & deasserted)
4225 BNX2X_ERR("IGU ERROR\n");
4226
4227 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4228 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4229
4230 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4231 aeu_mask = REG_RD(bp, reg_addr);
4232
4233 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
4234 aeu_mask, deasserted);
4235 aeu_mask |= (deasserted & 0x3ff);
4236 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4237
4238 REG_WR(bp, reg_addr, aeu_mask);
4239 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4240
4241 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4242 bp->attn_state &= ~deasserted;
4243 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4244}
4245
4246static void bnx2x_attn_int(struct bnx2x *bp)
4247{
4248 /* read local copy of bits */
4249 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4250 attn_bits);
4251 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4252 attn_bits_ack);
4253 u32 attn_state = bp->attn_state;
4254
4255 /* look for changed bits */
4256 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4257 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4258
4259 DP(NETIF_MSG_HW,
4260 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4261 attn_bits, attn_ack, asserted, deasserted);
4262
4263 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4264 BNX2X_ERR("BAD attention state\n");
4265
4266 /* handle bits that were raised */
4267 if (asserted)
4268 bnx2x_attn_int_asserted(bp, asserted);
4269
4270 if (deasserted)
4271 bnx2x_attn_int_deasserted(bp, deasserted);
4272}
4273
4274void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4275 u16 index, u8 op, u8 update)
4276{
4277 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4278
4279 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4280 igu_addr);
4281}
4282
4283static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4284{
4285 /* No memory barriers */
4286 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4287 mmiowb(); /* keep prod updates ordered */
4288}
4289
4290#ifdef BCM_CNIC
4291static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4292 union event_ring_elem *elem)
4293{
4294 u8 err = elem->message.error;
4295
4296 if (!bp->cnic_eth_dev.starting_cid ||
4297 (cid < bp->cnic_eth_dev.starting_cid &&
4298 cid != bp->cnic_eth_dev.iscsi_l2_cid))
4299 return 1;
4300
4301 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4302
4303 if (unlikely(err)) {
4304
4305 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4306 cid);
4307 bnx2x_panic_dump(bp);
4308 }
4309 bnx2x_cnic_cfc_comp(bp, cid, err);
4310 return 0;
4311}
4312#endif
4313
4314static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4315{
4316 struct bnx2x_mcast_ramrod_params rparam;
4317 int rc;
4318
4319 memset(&rparam, 0, sizeof(rparam));
4320
4321 rparam.mcast_obj = &bp->mcast_obj;
4322
4323 netif_addr_lock_bh(bp->dev);
4324
4325 /* Clear pending state for the last command */
4326 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4327
4328 /* If there are pending mcast commands - send them */
4329 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4330 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4331 if (rc < 0)
4332 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4333 rc);
4334 }
4335
4336 netif_addr_unlock_bh(bp->dev);
4337}
4338
4339static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4340 union event_ring_elem *elem)
4341{
4342 unsigned long ramrod_flags = 0;
4343 int rc = 0;
4344 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
4345 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
4346
4347 /* Always push next commands out, don't wait here */
4348 __set_bit(RAMROD_CONT, &ramrod_flags);
4349
4350 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
4351 case BNX2X_FILTER_MAC_PENDING:
4352#ifdef BCM_CNIC
4353 if (cid == BNX2X_ISCSI_ETH_CID)
4354 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4355 else
4356#endif
4357 vlan_mac_obj = &bp->fp[cid].mac_obj;
4358
4359 break;
4360 vlan_mac_obj = &bp->fp[cid].mac_obj;
4361
4362 case BNX2X_FILTER_MCAST_PENDING:
4363 /* This is only relevant for 57710 where multicast MACs are
4364 * configured as unicast MACs using the same ramrod.
4365 */
4366 bnx2x_handle_mcast_eqe(bp);
4367 return;
4368 default:
4369 BNX2X_ERR("Unsupported classification command: %d\n",
4370 elem->message.data.eth_event.echo);
4371 return;
4372 }
4373
4374 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
4375
4376 if (rc < 0)
4377 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
4378 else if (rc > 0)
4379 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
4380
4381}
4382
4383#ifdef BCM_CNIC
4384static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4385#endif
4386
4387static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4388{
4389 netif_addr_lock_bh(bp->dev);
4390
4391 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
4392
4393 /* Send rx_mode command again if was requested */
4394 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4395 bnx2x_set_storm_rx_mode(bp);
4396#ifdef BCM_CNIC
4397 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4398 &bp->sp_state))
4399 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4400 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4401 &bp->sp_state))
4402 bnx2x_set_iscsi_eth_rx_mode(bp, false);
4403#endif
4404
4405 netif_addr_unlock_bh(bp->dev);
4406}
4407
4408static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4409 struct bnx2x *bp, u32 cid)
4410{
4411 DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid);
4412#ifdef BCM_CNIC
4413 if (cid == BNX2X_FCOE_ETH_CID)
4414 return &bnx2x_fcoe(bp, q_obj);
4415 else
4416#endif
4417 return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
4418}
4419
4420static void bnx2x_eq_int(struct bnx2x *bp)
4421{
4422 u16 hw_cons, sw_cons, sw_prod;
4423 union event_ring_elem *elem;
4424 u32 cid;
4425 u8 opcode;
4426 int spqe_cnt = 0;
4427 struct bnx2x_queue_sp_obj *q_obj;
4428 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
4429 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
4430
4431 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
4432
4433 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
4434 * when we get the the next-page we nned to adjust so the loop
4435 * condition below will be met. The next element is the size of a
4436 * regular element and hence incrementing by 1
4437 */
4438 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
4439 hw_cons++;
4440
4441 /* This function may never run in parallel with itself for a
4442 * specific bp, thus there is no need in "paired" read memory
4443 * barrier here.
4444 */
4445 sw_cons = bp->eq_cons;
4446 sw_prod = bp->eq_prod;
4447
4448 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
4449 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
4450
4451 for (; sw_cons != hw_cons;
4452 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
4453
4454
4455 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
4456
4457 cid = SW_CID(elem->message.data.cfc_del_event.cid);
4458 opcode = elem->message.opcode;
4459
4460
4461 /* handle eq element */
4462 switch (opcode) {
4463 case EVENT_RING_OPCODE_STAT_QUERY:
4464 DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
4465 bp->stats_comp++);
4466 /* nothing to do with stats comp */
4467 goto next_spqe;
4468
4469 case EVENT_RING_OPCODE_CFC_DEL:
4470 /* handle according to cid range */
4471 /*
4472 * we may want to verify here that the bp state is
4473 * HALTING
4474 */
4475 DP(BNX2X_MSG_SP,
4476 "got delete ramrod for MULTI[%d]\n", cid);
4477#ifdef BCM_CNIC
4478 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4479 goto next_spqe;
4480#endif
4481 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4482
4483 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
4484 break;
4485
4486
4487
4488 goto next_spqe;
4489
4490 case EVENT_RING_OPCODE_STOP_TRAFFIC:
4491 DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
4492 if (f_obj->complete_cmd(bp, f_obj,
4493 BNX2X_F_CMD_TX_STOP))
4494 break;
4495 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
4496 goto next_spqe;
4497
4498 case EVENT_RING_OPCODE_START_TRAFFIC:
4499 DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
4500 if (f_obj->complete_cmd(bp, f_obj,
4501 BNX2X_F_CMD_TX_START))
4502 break;
4503 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4504 goto next_spqe;
4505 case EVENT_RING_OPCODE_FUNCTION_START:
4506 DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
4507 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
4508 break;
4509
4510 goto next_spqe;
4511
4512 case EVENT_RING_OPCODE_FUNCTION_STOP:
4513 DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
4514 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
4515 break;
4516
4517 goto next_spqe;
4518 }
4519
4520 switch (opcode | bp->state) {
4521 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
4522 BNX2X_STATE_OPEN):
4523 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
4524 BNX2X_STATE_OPENING_WAIT4_PORT):
4525 cid = elem->message.data.eth_event.echo &
4526 BNX2X_SWCID_MASK;
4527 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
4528 cid);
4529 rss_raw->clear_pending(rss_raw);
4530 break;
4531
4532 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
4533 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
4534 case (EVENT_RING_OPCODE_SET_MAC |
4535 BNX2X_STATE_CLOSING_WAIT4_HALT):
4536 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4537 BNX2X_STATE_OPEN):
4538 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4539 BNX2X_STATE_DIAG):
4540 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4541 BNX2X_STATE_CLOSING_WAIT4_HALT):
4542 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
4543 bnx2x_handle_classification_eqe(bp, elem);
4544 break;
4545
4546 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4547 BNX2X_STATE_OPEN):
4548 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4549 BNX2X_STATE_DIAG):
4550 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4551 BNX2X_STATE_CLOSING_WAIT4_HALT):
4552 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
4553 bnx2x_handle_mcast_eqe(bp);
4554 break;
4555
4556 case (EVENT_RING_OPCODE_FILTERS_RULES |
4557 BNX2X_STATE_OPEN):
4558 case (EVENT_RING_OPCODE_FILTERS_RULES |
4559 BNX2X_STATE_DIAG):
4560 case (EVENT_RING_OPCODE_FILTERS_RULES |
4561 BNX2X_STATE_CLOSING_WAIT4_HALT):
4562 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
4563 bnx2x_handle_rx_mode_eqe(bp);
4564 break;
4565 default:
4566 /* unknown event log error and continue */
4567 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
4568 elem->message.opcode, bp->state);
4569 }
4570next_spqe:
4571 spqe_cnt++;
4572 } /* for */
4573
4574 smp_mb__before_atomic_inc();
4575 atomic_add(spqe_cnt, &bp->eq_spq_left);
4576
4577 bp->eq_cons = sw_cons;
4578 bp->eq_prod = sw_prod;
4579 /* Make sure that above mem writes were issued towards the memory */
4580 smp_wmb();
4581
4582 /* update producer */
4583 bnx2x_update_eq_prod(bp, bp->eq_prod);
4584}
4585
4586static void bnx2x_sp_task(struct work_struct *work)
4587{
4588 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
4589 u16 status;
4590
4591 status = bnx2x_update_dsb_idx(bp);
4592/* if (status == 0) */
4593/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
4594
4595 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
4596
4597 /* HW attentions */
4598 if (status & BNX2X_DEF_SB_ATT_IDX) {
4599 bnx2x_attn_int(bp);
4600 status &= ~BNX2X_DEF_SB_ATT_IDX;
4601 }
4602
4603 /* SP events: STAT_QUERY and others */
4604 if (status & BNX2X_DEF_SB_IDX) {
4605#ifdef BCM_CNIC
4606 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
4607
4608 if ((!NO_FCOE(bp)) &&
4609 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
4610 /*
4611 * Prevent local bottom-halves from running as
4612 * we are going to change the local NAPI list.
4613 */
4614 local_bh_disable();
4615 napi_schedule(&bnx2x_fcoe(bp, napi));
4616 local_bh_enable();
4617 }
4618#endif
4619 /* Handle EQ completions */
4620 bnx2x_eq_int(bp);
4621
4622 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
4623 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
4624
4625 status &= ~BNX2X_DEF_SB_IDX;
4626 }
4627
4628 if (unlikely(status))
4629 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
4630 status);
4631
4632 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
4633 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
4634}
4635
4636irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4637{
4638 struct net_device *dev = dev_instance;
4639 struct bnx2x *bp = netdev_priv(dev);
4640
4641 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
4642 IGU_INT_DISABLE, 0);
4643
4644#ifdef BNX2X_STOP_ON_ERROR
4645 if (unlikely(bp->panic))
4646 return IRQ_HANDLED;
4647#endif
4648
4649#ifdef BCM_CNIC
4650 {
4651 struct cnic_ops *c_ops;
4652
4653 rcu_read_lock();
4654 c_ops = rcu_dereference(bp->cnic_ops);
4655 if (c_ops)
4656 c_ops->cnic_handler(bp->cnic_data, NULL);
4657 rcu_read_unlock();
4658 }
4659#endif
4660 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
4661
4662 return IRQ_HANDLED;
4663}
4664
4665/* end of slow path */
4666
4667
4668void bnx2x_drv_pulse(struct bnx2x *bp)
4669{
4670 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
4671 bp->fw_drv_pulse_wr_seq);
4672}
4673
4674
4675static void bnx2x_timer(unsigned long data)
4676{
4677 u8 cos;
4678 struct bnx2x *bp = (struct bnx2x *) data;
4679
4680 if (!netif_running(bp->dev))
4681 return;
4682
4683 if (poll) {
4684 struct bnx2x_fastpath *fp = &bp->fp[0];
4685
4686 for_each_cos_in_tx_queue(fp, cos)
4687 bnx2x_tx_int(bp, &fp->txdata[cos]);
4688 bnx2x_rx_int(fp, 1000);
4689 }
4690
4691 if (!BP_NOMCP(bp)) {
4692 int mb_idx = BP_FW_MB_IDX(bp);
4693 u32 drv_pulse;
4694 u32 mcp_pulse;
4695
4696 ++bp->fw_drv_pulse_wr_seq;
4697 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4698 /* TBD - add SYSTEM_TIME */
4699 drv_pulse = bp->fw_drv_pulse_wr_seq;
4700 bnx2x_drv_pulse(bp);
4701
4702 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
4703 MCP_PULSE_SEQ_MASK);
4704 /* The delta between driver pulse and mcp response
4705 * should be 1 (before mcp response) or 0 (after mcp response)
4706 */
4707 if ((drv_pulse != mcp_pulse) &&
4708 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4709 /* someone lost a heartbeat... */
4710 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4711 drv_pulse, mcp_pulse);
4712 }
4713 }
4714
4715 if (bp->state == BNX2X_STATE_OPEN)
4716 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4717
4718 mod_timer(&bp->timer, jiffies + bp->current_interval);
4719}
4720
4721/* end of Statistics */
4722
4723/* nic init */
4724
4725/*
4726 * nic init service functions
4727 */
4728
4729static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
4730{
4731 u32 i;
4732 if (!(len%4) && !(addr%4))
4733 for (i = 0; i < len; i += 4)
4734 REG_WR(bp, addr + i, fill);
4735 else
4736 for (i = 0; i < len; i++)
4737 REG_WR8(bp, addr + i, fill);
4738
4739}
4740
4741/* helper: writes FP SP data to FW - data_size in dwords */
4742static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
4743 int fw_sb_id,
4744 u32 *sb_data_p,
4745 u32 data_size)
4746{
4747 int index;
4748 for (index = 0; index < data_size; index++)
4749 REG_WR(bp, BAR_CSTRORM_INTMEM +
4750 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
4751 sizeof(u32)*index,
4752 *(sb_data_p + index));
4753}
4754
4755static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
4756{
4757 u32 *sb_data_p;
4758 u32 data_size = 0;
4759 struct hc_status_block_data_e2 sb_data_e2;
4760 struct hc_status_block_data_e1x sb_data_e1x;
4761
4762 /* disable the function first */
4763 if (!CHIP_IS_E1x(bp)) {
4764 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4765 sb_data_e2.common.state = SB_DISABLED;
4766 sb_data_e2.common.p_func.vf_valid = false;
4767 sb_data_p = (u32 *)&sb_data_e2;
4768 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4769 } else {
4770 memset(&sb_data_e1x, 0,
4771 sizeof(struct hc_status_block_data_e1x));
4772 sb_data_e1x.common.state = SB_DISABLED;
4773 sb_data_e1x.common.p_func.vf_valid = false;
4774 sb_data_p = (u32 *)&sb_data_e1x;
4775 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4776 }
4777 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4778
4779 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4780 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
4781 CSTORM_STATUS_BLOCK_SIZE);
4782 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4783 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
4784 CSTORM_SYNC_BLOCK_SIZE);
4785}
4786
4787/* helper: writes SP SB data to FW */
4788static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4789 struct hc_sp_status_block_data *sp_sb_data)
4790{
4791 int func = BP_FUNC(bp);
4792 int i;
4793 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4794 REG_WR(bp, BAR_CSTRORM_INTMEM +
4795 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4796 i*sizeof(u32),
4797 *((u32 *)sp_sb_data + i));
4798}
4799
4800static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4801{
4802 int func = BP_FUNC(bp);
4803 struct hc_sp_status_block_data sp_sb_data;
4804 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4805
4806 sp_sb_data.state = SB_DISABLED;
4807 sp_sb_data.p_func.vf_valid = false;
4808
4809 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4810
4811 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4812 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4813 CSTORM_SP_STATUS_BLOCK_SIZE);
4814 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4815 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4816 CSTORM_SP_SYNC_BLOCK_SIZE);
4817
4818}
4819
4820
4821static inline
4822void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4823 int igu_sb_id, int igu_seg_id)
4824{
4825 hc_sm->igu_sb_id = igu_sb_id;
4826 hc_sm->igu_seg_id = igu_seg_id;
4827 hc_sm->timer_value = 0xFF;
4828 hc_sm->time_to_expire = 0xFFFFFFFF;
4829}
4830
4831
4832/* allocates state machine ids. */
4833static inline
4834void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4835{
4836 /* zero out state machine indices */
4837 /* rx indices */
4838 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4839
4840 /* tx indices */
4841 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4842 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
4843 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
4844 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
4845
4846 /* map indices */
4847 /* rx indices */
4848 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
4849 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4850
4851 /* tx indices */
4852 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
4853 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
4855 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4856 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
4857 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4858 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
4859 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4860}
4861
4862static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4863 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4864{
4865 int igu_seg_id;
4866
4867 struct hc_status_block_data_e2 sb_data_e2;
4868 struct hc_status_block_data_e1x sb_data_e1x;
4869 struct hc_status_block_sm *hc_sm_p;
4870 int data_size;
4871 u32 *sb_data_p;
4872
4873 if (CHIP_INT_MODE_IS_BC(bp))
4874 igu_seg_id = HC_SEG_ACCESS_NORM;
4875 else
4876 igu_seg_id = IGU_SEG_ACCESS_NORM;
4877
4878 bnx2x_zero_fp_sb(bp, fw_sb_id);
4879
4880 if (!CHIP_IS_E1x(bp)) {
4881 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4882 sb_data_e2.common.state = SB_ENABLED;
4883 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4884 sb_data_e2.common.p_func.vf_id = vfid;
4885 sb_data_e2.common.p_func.vf_valid = vf_valid;
4886 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4887 sb_data_e2.common.same_igu_sb_1b = true;
4888 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4889 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4890 hc_sm_p = sb_data_e2.common.state_machine;
4891 sb_data_p = (u32 *)&sb_data_e2;
4892 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4893 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
4894 } else {
4895 memset(&sb_data_e1x, 0,
4896 sizeof(struct hc_status_block_data_e1x));
4897 sb_data_e1x.common.state = SB_ENABLED;
4898 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4899 sb_data_e1x.common.p_func.vf_id = 0xff;
4900 sb_data_e1x.common.p_func.vf_valid = false;
4901 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4902 sb_data_e1x.common.same_igu_sb_1b = true;
4903 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4904 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4905 hc_sm_p = sb_data_e1x.common.state_machine;
4906 sb_data_p = (u32 *)&sb_data_e1x;
4907 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4908 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
4909 }
4910
4911 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4912 igu_sb_id, igu_seg_id);
4913 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4914 igu_sb_id, igu_seg_id);
4915
4916 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4917
4918 /* write indecies to HW */
4919 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4920}
4921
4922static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
4923 u16 tx_usec, u16 rx_usec)
4924{
4925 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
4926 false, rx_usec);
4927 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
4928 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
4929 tx_usec);
4930 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
4931 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
4932 tx_usec);
4933 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
4934 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
4935 tx_usec);
4936}
4937
4938static void bnx2x_init_def_sb(struct bnx2x *bp)
4939{
4940 struct host_sp_status_block *def_sb = bp->def_status_blk;
4941 dma_addr_t mapping = bp->def_status_blk_mapping;
4942 int igu_sp_sb_index;
4943 int igu_seg_id;
4944 int port = BP_PORT(bp);
4945 int func = BP_FUNC(bp);
4946 int reg_offset, reg_offset_en5;
4947 u64 section;
4948 int index;
4949 struct hc_sp_status_block_data sp_sb_data;
4950 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4951
4952 if (CHIP_INT_MODE_IS_BC(bp)) {
4953 igu_sp_sb_index = DEF_SB_IGU_ID;
4954 igu_seg_id = HC_SEG_ACCESS_DEF;
4955 } else {
4956 igu_sp_sb_index = bp->igu_dsb_id;
4957 igu_seg_id = IGU_SEG_ACCESS_DEF;
4958 }
4959
4960 /* ATTN */
4961 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4962 atten_status_block);
4963 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4964
4965 bp->attn_state = 0;
4966
4967 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4968 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4969 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
4970 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
4971 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4972 int sindex;
4973 /* take care of sig[0]..sig[4] */
4974 for (sindex = 0; sindex < 4; sindex++)
4975 bp->attn_group[index].sig[sindex] =
4976 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4977
4978 if (!CHIP_IS_E1x(bp))
4979 /*
4980 * enable5 is separate from the rest of the registers,
4981 * and therefore the address skip is 4
4982 * and not 16 between the different groups
4983 */
4984 bp->attn_group[index].sig[4] = REG_RD(bp,
4985 reg_offset_en5 + 0x4*index);
4986 else
4987 bp->attn_group[index].sig[4] = 0;
4988 }
4989
4990 if (bp->common.int_block == INT_BLOCK_HC) {
4991 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4992 HC_REG_ATTN_MSG0_ADDR_L);
4993
4994 REG_WR(bp, reg_offset, U64_LO(section));
4995 REG_WR(bp, reg_offset + 4, U64_HI(section));
4996 } else if (!CHIP_IS_E1x(bp)) {
4997 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4998 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4999 }
5000
5001 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5002 sp_sb);
5003
5004 bnx2x_zero_sp_sb(bp);
5005
5006 sp_sb_data.state = SB_ENABLED;
5007 sp_sb_data.host_sb_addr.lo = U64_LO(section);
5008 sp_sb_data.host_sb_addr.hi = U64_HI(section);
5009 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5010 sp_sb_data.igu_seg_id = igu_seg_id;
5011 sp_sb_data.p_func.pf_id = func;
5012 sp_sb_data.p_func.vnic_id = BP_VN(bp);
5013 sp_sb_data.p_func.vf_id = 0xff;
5014
5015 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5016
5017 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5018}
5019
5020void bnx2x_update_coalesce(struct bnx2x *bp)
5021{
5022 int i;
5023
5024 for_each_eth_queue(bp, i)
5025 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5026 bp->tx_ticks, bp->rx_ticks);
5027}
5028
5029static void bnx2x_init_sp_ring(struct bnx2x *bp)
5030{
5031 spin_lock_init(&bp->spq_lock);
5032 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5033
5034 bp->spq_prod_idx = 0;
5035 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5036 bp->spq_prod_bd = bp->spq;
5037 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5038}
5039
5040static void bnx2x_init_eq_ring(struct bnx2x *bp)
5041{
5042 int i;
5043 for (i = 1; i <= NUM_EQ_PAGES; i++) {
5044 union event_ring_elem *elem =
5045 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5046
5047 elem->next_page.addr.hi =
5048 cpu_to_le32(U64_HI(bp->eq_mapping +
5049 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5050 elem->next_page.addr.lo =
5051 cpu_to_le32(U64_LO(bp->eq_mapping +
5052 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5053 }
5054 bp->eq_cons = 0;
5055 bp->eq_prod = NUM_EQ_DESC;
5056 bp->eq_cons_sb = BNX2X_EQ_INDEX;
5057 /* we want a warning message before it gets rought... */
5058 atomic_set(&bp->eq_spq_left,
5059 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5060}
5061
5062
5063/* called with netif_addr_lock_bh() */
5064void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5065 unsigned long rx_mode_flags,
5066 unsigned long rx_accept_flags,
5067 unsigned long tx_accept_flags,
5068 unsigned long ramrod_flags)
5069{
5070 struct bnx2x_rx_mode_ramrod_params ramrod_param;
5071 int rc;
5072
5073 memset(&ramrod_param, 0, sizeof(ramrod_param));
5074
5075 /* Prepare ramrod parameters */
5076 ramrod_param.cid = 0;
5077 ramrod_param.cl_id = cl_id;
5078 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5079 ramrod_param.func_id = BP_FUNC(bp);
5080
5081 ramrod_param.pstate = &bp->sp_state;
5082 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5083
5084 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5085 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5086
5087 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5088
5089 ramrod_param.ramrod_flags = ramrod_flags;
5090 ramrod_param.rx_mode_flags = rx_mode_flags;
5091
5092 ramrod_param.rx_accept_flags = rx_accept_flags;
5093 ramrod_param.tx_accept_flags = tx_accept_flags;
5094
5095 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5096 if (rc < 0) {
5097 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5098 return;
5099 }
5100}
5101
5102/* called with netif_addr_lock_bh() */
5103void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5104{
5105 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5106 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5107
5108#ifdef BCM_CNIC
5109 if (!NO_FCOE(bp))
5110
5111 /* Configure rx_mode of FCoE Queue */
5112 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5113#endif
5114
5115 switch (bp->rx_mode) {
5116 case BNX2X_RX_MODE_NONE:
5117 /*
5118 * 'drop all' supersedes any accept flags that may have been
5119 * passed to the function.
5120 */
5121 break;
5122 case BNX2X_RX_MODE_NORMAL:
5123 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5124 __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
5125 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5126
5127 /* internal switching mode */
5128 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5129 __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
5130 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5131
5132 break;
5133 case BNX2X_RX_MODE_ALLMULTI:
5134 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5135 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5136 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5137
5138 /* internal switching mode */
5139 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5140 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5141 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5142
5143 break;
5144 case BNX2X_RX_MODE_PROMISC:
5145 /* According to deffinition of SI mode, iface in promisc mode
5146 * should receive matched and unmatched (in resolution of port)
5147 * unicast packets.
5148 */
5149 __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
5150 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5151 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5152 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5153
5154 /* internal switching mode */
5155 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5156 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5157
5158 if (IS_MF_SI(bp))
5159 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
5160 else
5161 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5162
5163 break;
5164 default:
5165 BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
5166 return;
5167 }
5168
5169 if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5170 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
5171 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
5172 }
5173
5174 __set_bit(RAMROD_RX, &ramrod_flags);
5175 __set_bit(RAMROD_TX, &ramrod_flags);
5176
5177 bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
5178 tx_accept_flags, ramrod_flags);
5179}
5180
5181static void bnx2x_init_internal_common(struct bnx2x *bp)
5182{
5183 int i;
5184
5185 if (IS_MF_SI(bp))
5186 /*
5187 * In switch independent mode, the TSTORM needs to accept
5188 * packets that failed classification, since approximate match
5189 * mac addresses aren't written to NIG LLH
5190 */
5191 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5192 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
5193 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
5194 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5195 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
5196
5197 /* Zero this manually as its initialization is
5198 currently missing in the initTool */
5199 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5200 REG_WR(bp, BAR_USTRORM_INTMEM +
5201 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5202 if (!CHIP_IS_E1x(bp)) {
5203 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
5204 CHIP_INT_MODE_IS_BC(bp) ?
5205 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
5206 }
5207}
5208
5209static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5210{
5211 switch (load_code) {
5212 case FW_MSG_CODE_DRV_LOAD_COMMON:
5213 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5214 bnx2x_init_internal_common(bp);
5215 /* no break */
5216
5217 case FW_MSG_CODE_DRV_LOAD_PORT:
5218 /* nothing to do */
5219 /* no break */
5220
5221 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5222 /* internal memory per function is
5223 initialized inside bnx2x_pf_init */
5224 break;
5225
5226 default:
5227 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5228 break;
5229 }
5230}
5231
5232static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5233{
5234 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
5235}
5236
5237static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5238{
5239 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
5240}
5241
5242static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5243{
5244 if (CHIP_IS_E1x(fp->bp))
5245 return BP_L_ID(fp->bp) + fp->index;
5246 else /* We want Client ID to be the same as IGU SB ID for 57712 */
5247 return bnx2x_fp_igu_sb_id(fp);
5248}
5249
5250static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5251{
5252 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
5253 u8 cos;
5254 unsigned long q_type = 0;
5255 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
5256
5257 fp->cid = fp_idx;
5258 fp->cl_id = bnx2x_fp_cl_id(fp);
5259 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
5260 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
5261 /* qZone id equals to FW (per path) client id */
5262 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
5263
5264 /* init shortcut */
5265 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
5266 /* Setup SB indicies */
5267 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5268
5269 /* Configure Queue State object */
5270 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
5271 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
5272
5273 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
5274
5275 /* init tx data */
5276 for_each_cos_in_tx_queue(fp, cos) {
5277 bnx2x_init_txdata(bp, &fp->txdata[cos],
5278 CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
5279 FP_COS_TO_TXQ(fp, cos),
5280 BNX2X_TX_SB_INDEX_BASE + cos);
5281 cids[cos] = fp->txdata[cos].cid;
5282 }
5283
5284 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
5285 BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5286 bnx2x_sp_mapping(bp, q_rdata), q_type);
5287
5288 /**
5289 * Configure classification DBs: Always enable Tx switching
5290 */
5291 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
5292
5293 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
5294 "cl_id %d fw_sb %d igu_sb %d\n",
5295 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
5296 fp->igu_sb_id);
5297 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
5298 fp->fw_sb_id, fp->igu_sb_id);
5299
5300 bnx2x_update_fpsb_idx(fp);
5301}
5302
5303void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5304{
5305 int i;
5306
5307 for_each_eth_queue(bp, i)
5308 bnx2x_init_eth_fp(bp, i);
5309#ifdef BCM_CNIC
5310 if (!NO_FCOE(bp))
5311 bnx2x_init_fcoe_fp(bp);
5312
5313 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
5314 BNX2X_VF_ID_INVALID, false,
5315 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5316
5317#endif
5318
5319 /* Initialize MOD_ABS interrupts */
5320 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
5321 bp->common.shmem_base, bp->common.shmem2_base,
5322 BP_PORT(bp));
5323 /* ensure status block indices were read */
5324 rmb();
5325
5326 bnx2x_init_def_sb(bp);
5327 bnx2x_update_dsb_idx(bp);
5328 bnx2x_init_rx_rings(bp);
5329 bnx2x_init_tx_rings(bp);
5330 bnx2x_init_sp_ring(bp);
5331 bnx2x_init_eq_ring(bp);
5332 bnx2x_init_internal(bp, load_code);
5333 bnx2x_pf_init(bp);
5334 bnx2x_stats_init(bp);
5335
5336 /* flush all before enabling interrupts */
5337 mb();
5338 mmiowb();
5339
5340 bnx2x_int_enable(bp);
5341
5342 /* Check for SPIO5 */
5343 bnx2x_attn_int_deasserted0(bp,
5344 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5345 AEU_INPUTS_ATTN_BITS_SPIO5);
5346}
5347
5348/* end of nic init */
5349
5350/*
5351 * gzip service functions
5352 */
5353
5354static int bnx2x_gunzip_init(struct bnx2x *bp)
5355{
5356 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5357 &bp->gunzip_mapping, GFP_KERNEL);
5358 if (bp->gunzip_buf == NULL)
5359 goto gunzip_nomem1;
5360
5361 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5362 if (bp->strm == NULL)
5363 goto gunzip_nomem2;
5364
5365 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
5366 if (bp->strm->workspace == NULL)
5367 goto gunzip_nomem3;
5368
5369 return 0;
5370
5371gunzip_nomem3:
5372 kfree(bp->strm);
5373 bp->strm = NULL;
5374
5375gunzip_nomem2:
5376 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5377 bp->gunzip_mapping);
5378 bp->gunzip_buf = NULL;
5379
5380gunzip_nomem1:
5381 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
5382 " un-compression\n");
5383 return -ENOMEM;
5384}
5385
5386static void bnx2x_gunzip_end(struct bnx2x *bp)
5387{
5388 if (bp->strm) {
5389 vfree(bp->strm->workspace);
5390 kfree(bp->strm);
5391 bp->strm = NULL;
5392 }
5393
5394 if (bp->gunzip_buf) {
5395 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5396 bp->gunzip_mapping);
5397 bp->gunzip_buf = NULL;
5398 }
5399}
5400
5401static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5402{
5403 int n, rc;
5404
5405 /* check gzip header */
5406 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5407 BNX2X_ERR("Bad gzip header\n");
5408 return -EINVAL;
5409 }
5410
5411 n = 10;
5412
5413#define FNAME 0x8
5414
5415 if (zbuf[3] & FNAME)
5416 while ((zbuf[n++] != 0) && (n < len));
5417
5418 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5419 bp->strm->avail_in = len - n;
5420 bp->strm->next_out = bp->gunzip_buf;
5421 bp->strm->avail_out = FW_BUF_SIZE;
5422
5423 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5424 if (rc != Z_OK)
5425 return rc;
5426
5427 rc = zlib_inflate(bp->strm, Z_FINISH);
5428 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5429 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5430 bp->strm->msg);
5431
5432 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5433 if (bp->gunzip_outlen & 0x3)
5434 netdev_err(bp->dev, "Firmware decompression error:"
5435 " gunzip_outlen (%d) not aligned\n",
5436 bp->gunzip_outlen);
5437 bp->gunzip_outlen >>= 2;
5438
5439 zlib_inflateEnd(bp->strm);
5440
5441 if (rc == Z_STREAM_END)
5442 return 0;
5443
5444 return rc;
5445}
5446
5447/* nic load/unload */
5448
5449/*
5450 * General service functions
5451 */
5452
5453/* send a NIG loopback debug packet */
5454static void bnx2x_lb_pckt(struct bnx2x *bp)
5455{
5456 u32 wb_write[3];
5457
5458 /* Ethernet source and destination addresses */
5459 wb_write[0] = 0x55555555;
5460 wb_write[1] = 0x55555555;
5461 wb_write[2] = 0x20; /* SOP */
5462 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5463
5464 /* NON-IP protocol */
5465 wb_write[0] = 0x09000000;
5466 wb_write[1] = 0x55555555;
5467 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5468 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5469}
5470
5471/* some of the internal memories
5472 * are not directly readable from the driver
5473 * to test them we send debug packets
5474 */
5475static int bnx2x_int_mem_test(struct bnx2x *bp)
5476{
5477 int factor;
5478 int count, i;
5479 u32 val = 0;
5480
5481 if (CHIP_REV_IS_FPGA(bp))
5482 factor = 120;
5483 else if (CHIP_REV_IS_EMUL(bp))
5484 factor = 200;
5485 else
5486 factor = 1;
5487
5488 /* Disable inputs of parser neighbor blocks */
5489 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5490 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5491 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5492 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5493
5494 /* Write 0 to parser credits for CFC search request */
5495 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5496
5497 /* send Ethernet packet */
5498 bnx2x_lb_pckt(bp);
5499
5500 /* TODO do i reset NIG statistic? */
5501 /* Wait until NIG register shows 1 packet of size 0x10 */
5502 count = 1000 * factor;
5503 while (count) {
5504
5505 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5506 val = *bnx2x_sp(bp, wb_data[0]);
5507 if (val == 0x10)
5508 break;
5509
5510 msleep(10);
5511 count--;
5512 }
5513 if (val != 0x10) {
5514 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5515 return -1;
5516 }
5517
5518 /* Wait until PRS register shows 1 packet */
5519 count = 1000 * factor;
5520 while (count) {
5521 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5522 if (val == 1)
5523 break;
5524
5525 msleep(10);
5526 count--;
5527 }
5528 if (val != 0x1) {
5529 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5530 return -2;
5531 }
5532
5533 /* Reset and init BRB, PRS */
5534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5535 msleep(50);
5536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5537 msleep(50);
5538 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
5539 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
5540
5541 DP(NETIF_MSG_HW, "part2\n");
5542
5543 /* Disable inputs of parser neighbor blocks */
5544 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5545 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5546 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5547 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5548
5549 /* Write 0 to parser credits for CFC search request */
5550 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5551
5552 /* send 10 Ethernet packets */
5553 for (i = 0; i < 10; i++)
5554 bnx2x_lb_pckt(bp);
5555
5556 /* Wait until NIG register shows 10 + 1
5557 packets of size 11*0x10 = 0xb0 */
5558 count = 1000 * factor;
5559 while (count) {
5560
5561 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5562 val = *bnx2x_sp(bp, wb_data[0]);
5563 if (val == 0xb0)
5564 break;
5565
5566 msleep(10);
5567 count--;
5568 }
5569 if (val != 0xb0) {
5570 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5571 return -3;
5572 }
5573
5574 /* Wait until PRS register shows 2 packets */
5575 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5576 if (val != 2)
5577 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5578
5579 /* Write 1 to parser credits for CFC search request */
5580 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5581
5582 /* Wait until PRS register shows 3 packets */
5583 msleep(10 * factor);
5584 /* Wait until NIG register shows 1 packet of size 0x10 */
5585 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5586 if (val != 3)
5587 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5588
5589 /* clear NIG EOP FIFO */
5590 for (i = 0; i < 11; i++)
5591 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5592 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5593 if (val != 1) {
5594 BNX2X_ERR("clear of NIG failed\n");
5595 return -4;
5596 }
5597
5598 /* Reset and init BRB, PRS, NIG */
5599 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5600 msleep(50);
5601 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5602 msleep(50);
5603 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
5604 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
5605#ifndef BCM_CNIC
5606 /* set NIC mode */
5607 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5608#endif
5609
5610 /* Enable inputs of parser neighbor blocks */
5611 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5612 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5613 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5614 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5615
5616 DP(NETIF_MSG_HW, "done\n");
5617
5618 return 0; /* OK */
5619}
5620
5621static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
5622{
5623 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5624 if (!CHIP_IS_E1x(bp))
5625 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
5626 else
5627 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5628 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5629 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5630 /*
5631 * mask read length error interrupts in brb for parser
5632 * (parsing unit and 'checksum and crc' unit)
5633 * these errors are legal (PU reads fixed length and CAC can cause
5634 * read length error on truncated packets)
5635 */
5636 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
5637 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5638 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5639 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5640 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5641 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5642/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5643/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5644 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5645 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5646 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5647/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5648/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5649 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5650 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5651 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5652 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5653/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5654/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5655
5656 if (CHIP_REV_IS_FPGA(bp))
5657 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5658 else if (!CHIP_IS_E1x(bp))
5659 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
5660 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
5661 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
5662 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
5663 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
5664 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
5665 else
5666 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5667 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5668 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5669 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5670/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5671
5672 if (!CHIP_IS_E1x(bp))
5673 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
5674 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
5675
5676 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5677 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5678/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5679 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
5680}
5681
5682static void bnx2x_reset_common(struct bnx2x *bp)
5683{
5684 u32 val = 0x1400;
5685
5686 /* reset_common */
5687 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5688 0xd3ffff7f);
5689
5690 if (CHIP_IS_E3(bp)) {
5691 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
5692 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
5693 }
5694
5695 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
5696}
5697
5698static void bnx2x_setup_dmae(struct bnx2x *bp)
5699{
5700 bp->dmae_ready = 0;
5701 spin_lock_init(&bp->dmae_lock);
5702}
5703
5704static void bnx2x_init_pxp(struct bnx2x *bp)
5705{
5706 u16 devctl;
5707 int r_order, w_order;
5708
5709 pci_read_config_word(bp->pdev,
5710 pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
5711 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5712 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5713 if (bp->mrrs == -1)
5714 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5715 else {
5716 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5717 r_order = bp->mrrs;
5718 }
5719
5720 bnx2x_init_pxp_arb(bp, r_order, w_order);
5721}
5722
5723static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5724{
5725 int is_required;
5726 u32 val;
5727 int port;
5728
5729 if (BP_NOMCP(bp))
5730 return;
5731
5732 is_required = 0;
5733 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5734 SHARED_HW_CFG_FAN_FAILURE_MASK;
5735
5736 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5737 is_required = 1;
5738
5739 /*
5740 * The fan failure mechanism is usually related to the PHY type since
5741 * the power consumption of the board is affected by the PHY. Currently,
5742 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5743 */
5744 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5745 for (port = PORT_0; port < PORT_MAX; port++) {
5746 is_required |=
5747 bnx2x_fan_failure_det_req(
5748 bp,
5749 bp->common.shmem_base,
5750 bp->common.shmem2_base,
5751 port);
5752 }
5753
5754 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5755
5756 if (is_required == 0)
5757 return;
5758
5759 /* Fan failure is indicated by SPIO 5 */
5760 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5761 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5762
5763 /* set to active low mode */
5764 val = REG_RD(bp, MISC_REG_SPIO_INT);
5765 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5766 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5767 REG_WR(bp, MISC_REG_SPIO_INT, val);
5768
5769 /* enable interrupt to signal the IGU */
5770 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5771 val |= (1 << MISC_REGISTERS_SPIO_5);
5772 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5773}
5774
5775static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
5776{
5777 u32 offset = 0;
5778
5779 if (CHIP_IS_E1(bp))
5780 return;
5781 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
5782 return;
5783
5784 switch (BP_ABS_FUNC(bp)) {
5785 case 0:
5786 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
5787 break;
5788 case 1:
5789 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
5790 break;
5791 case 2:
5792 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
5793 break;
5794 case 3:
5795 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
5796 break;
5797 case 4:
5798 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
5799 break;
5800 case 5:
5801 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
5802 break;
5803 case 6:
5804 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
5805 break;
5806 case 7:
5807 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
5808 break;
5809 default:
5810 return;
5811 }
5812
5813 REG_WR(bp, offset, pretend_func_num);
5814 REG_RD(bp, offset);
5815 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
5816}
5817
5818void bnx2x_pf_disable(struct bnx2x *bp)
5819{
5820 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
5821 val &= ~IGU_PF_CONF_FUNC_EN;
5822
5823 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
5824 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
5825 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
5826}
5827
5828static inline void bnx2x__common_init_phy(struct bnx2x *bp)
5829{
5830 u32 shmem_base[2], shmem2_base[2];
5831 shmem_base[0] = bp->common.shmem_base;
5832 shmem2_base[0] = bp->common.shmem2_base;
5833 if (!CHIP_IS_E1x(bp)) {
5834 shmem_base[1] =
5835 SHMEM2_RD(bp, other_shmem_base_addr);
5836 shmem2_base[1] =
5837 SHMEM2_RD(bp, other_shmem2_base_addr);
5838 }
5839 bnx2x_acquire_phy_lock(bp);
5840 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5841 bp->common.chip_id);
5842 bnx2x_release_phy_lock(bp);
5843}
5844
5845/**
5846 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
5847 *
5848 * @bp: driver handle
5849 */
5850static int bnx2x_init_hw_common(struct bnx2x *bp)
5851{
5852 u32 val;
5853
5854 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
5855
5856 /*
5857 * take the UNDI lock to protect undi_unload flow from accessing
5858 * registers while we're resetting the chip
5859 */
5860 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5861
5862 bnx2x_reset_common(bp);
5863 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5864
5865 val = 0xfffc;
5866 if (CHIP_IS_E3(bp)) {
5867 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
5868 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
5869 }
5870 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5871
5872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5873
5874 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5875
5876 if (!CHIP_IS_E1x(bp)) {
5877 u8 abs_func_id;
5878
5879 /**
5880 * 4-port mode or 2-port mode we need to turn of master-enable
5881 * for everyone, after that, turn it back on for self.
5882 * so, we disregard multi-function or not, and always disable
5883 * for all functions on the given path, this means 0,2,4,6 for
5884 * path 0 and 1,3,5,7 for path 1
5885 */
5886 for (abs_func_id = BP_PATH(bp);
5887 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
5888 if (abs_func_id == BP_ABS_FUNC(bp)) {
5889 REG_WR(bp,
5890 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
5891 1);
5892 continue;
5893 }
5894
5895 bnx2x_pretend_func(bp, abs_func_id);
5896 /* clear pf enable */
5897 bnx2x_pf_disable(bp);
5898 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5899 }
5900 }
5901
5902 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
5903 if (CHIP_IS_E1(bp)) {
5904 /* enable HW interrupt from PXP on USDM overflow
5905 bit 16 on INT_MASK_0 */
5906 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5907 }
5908
5909 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
5910 bnx2x_init_pxp(bp);
5911
5912#ifdef __BIG_ENDIAN
5913 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5914 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5915 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5916 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5917 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5918 /* make sure this value is 0 */
5919 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5920
5921/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5922 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5923 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5924 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5925 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5926#endif
5927
5928 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5929
5930 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5931 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5932
5933 /* let the HW do it's magic ... */
5934 msleep(100);
5935 /* finish PXP init */
5936 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5937 if (val != 1) {
5938 BNX2X_ERR("PXP2 CFG failed\n");
5939 return -EBUSY;
5940 }
5941 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5942 if (val != 1) {
5943 BNX2X_ERR("PXP2 RD_INIT failed\n");
5944 return -EBUSY;
5945 }
5946
5947 /* Timers bug workaround E2 only. We need to set the entire ILT to
5948 * have entries with value "0" and valid bit on.
5949 * This needs to be done by the first PF that is loaded in a path
5950 * (i.e. common phase)
5951 */
5952 if (!CHIP_IS_E1x(bp)) {
5953/* In E2 there is a bug in the timers block that can cause function 6 / 7
5954 * (i.e. vnic3) to start even if it is marked as "scan-off".
5955 * This occurs when a different function (func2,3) is being marked
5956 * as "scan-off". Real-life scenario for example: if a driver is being
5957 * load-unloaded while func6,7 are down. This will cause the timer to access
5958 * the ilt, translate to a logical address and send a request to read/write.
5959 * Since the ilt for the function that is down is not valid, this will cause
5960 * a translation error which is unrecoverable.
5961 * The Workaround is intended to make sure that when this happens nothing fatal
5962 * will occur. The workaround:
5963 * 1. First PF driver which loads on a path will:
5964 * a. After taking the chip out of reset, by using pretend,
5965 * it will write "0" to the following registers of
5966 * the other vnics.
5967 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
5968 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
5969 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
5970 * And for itself it will write '1' to
5971 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
5972 * dmae-operations (writing to pram for example.)
5973 * note: can be done for only function 6,7 but cleaner this
5974 * way.
5975 * b. Write zero+valid to the entire ILT.
5976 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
5977 * VNIC3 (of that port). The range allocated will be the
5978 * entire ILT. This is needed to prevent ILT range error.
5979 * 2. Any PF driver load flow:
5980 * a. ILT update with the physical addresses of the allocated
5981 * logical pages.
5982 * b. Wait 20msec. - note that this timeout is needed to make
5983 * sure there are no requests in one of the PXP internal
5984 * queues with "old" ILT addresses.
5985 * c. PF enable in the PGLC.
5986 * d. Clear the was_error of the PF in the PGLC. (could have
5987 * occured while driver was down)
5988 * e. PF enable in the CFC (WEAK + STRONG)
5989 * f. Timers scan enable
5990 * 3. PF driver unload flow:
5991 * a. Clear the Timers scan_en.
5992 * b. Polling for scan_on=0 for that PF.
5993 * c. Clear the PF enable bit in the PXP.
5994 * d. Clear the PF enable in the CFC (WEAK + STRONG)
5995 * e. Write zero+valid to all ILT entries (The valid bit must
5996 * stay set)
5997 * f. If this is VNIC 3 of a port then also init
5998 * first_timers_ilt_entry to zero and last_timers_ilt_entry
5999 * to the last enrty in the ILT.
6000 *
6001 * Notes:
6002 * Currently the PF error in the PGLC is non recoverable.
6003 * In the future the there will be a recovery routine for this error.
6004 * Currently attention is masked.
6005 * Having an MCP lock on the load/unload process does not guarantee that
6006 * there is no Timer disable during Func6/7 enable. This is because the
6007 * Timers scan is currently being cleared by the MCP on FLR.
6008 * Step 2.d can be done only for PF6/7 and the driver can also check if
6009 * there is error before clearing it. But the flow above is simpler and
6010 * more general.
6011 * All ILT entries are written by zero+valid and not just PF6/7
6012 * ILT entries since in the future the ILT entries allocation for
6013 * PF-s might be dynamic.
6014 */
6015 struct ilt_client_info ilt_cli;
6016 struct bnx2x_ilt ilt;
6017 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6018 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6019
6020 /* initialize dummy TM client */
6021 ilt_cli.start = 0;
6022 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6023 ilt_cli.client_num = ILT_CLIENT_TM;
6024
6025 /* Step 1: set zeroes to all ilt page entries with valid bit on
6026 * Step 2: set the timers first/last ilt entry to point
6027 * to the entire range to prevent ILT range error for 3rd/4th
6028 * vnic (this code assumes existance of the vnic)
6029 *
6030 * both steps performed by call to bnx2x_ilt_client_init_op()
6031 * with dummy TM client
6032 *
6033 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
6034 * and his brother are split registers
6035 */
6036 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6037 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6038 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6039
6040 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6041 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6042 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6043 }
6044
6045
6046 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6047 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6048
6049 if (!CHIP_IS_E1x(bp)) {
6050 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6051 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
6052 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
6053
6054 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
6055
6056 /* let the HW do it's magic ... */
6057 do {
6058 msleep(200);
6059 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6060 } while (factor-- && (val != 1));
6061
6062 if (val != 1) {
6063 BNX2X_ERR("ATC_INIT failed\n");
6064 return -EBUSY;
6065 }
6066 }
6067
6068 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
6069
6070 /* clean the DMAE memory */
6071 bp->dmae_ready = 1;
6072 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6073
6074 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6075
6076 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6077
6078 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
6079
6080 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
6081
6082 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6083 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6084 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6085 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6086
6087 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6088
6089
6090 /* QM queues pointers table */
6091 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6092
6093 /* soft reset pulse */
6094 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6095 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6096
6097#ifdef BCM_CNIC
6098 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6099#endif
6100
6101 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6102 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
6103 if (!CHIP_REV_IS_SLOW(bp))
6104 /* enable hw interrupt from doorbell Q */
6105 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6106
6107 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6108
6109 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6110 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6111
6112 if (!CHIP_IS_E1(bp))
6113 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6114
6115 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
6116 /* Bit-map indicating which L2 hdrs may appear
6117 * after the basic Ethernet header
6118 */
6119 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6120 bp->path_has_ovlan ? 7 : 6);
6121
6122 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6123 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6124 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6125 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
6126
6127 if (!CHIP_IS_E1x(bp)) {
6128 /* reset VFC memories */
6129 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6130 VFC_MEMORIES_RST_REG_CAM_RST |
6131 VFC_MEMORIES_RST_REG_RAM_RST);
6132 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6133 VFC_MEMORIES_RST_REG_CAM_RST |
6134 VFC_MEMORIES_RST_REG_RAM_RST);
6135
6136 msleep(20);
6137 }
6138
6139 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6140 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6141 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6142 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
6143
6144 /* sync semi rtc */
6145 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6146 0x80000000);
6147 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6148 0x80000000);
6149
6150 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6151 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6152 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6153
6154 if (!CHIP_IS_E1x(bp))
6155 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6156 bp->path_has_ovlan ? 7 : 6);
6157
6158 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6159
6160 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6161
6162#ifdef BCM_CNIC
6163 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6164 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6165 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6166 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6167 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6168 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6169 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6170 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6171 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6172 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6173#endif
6174 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6175
6176 if (sizeof(union cdu_context) != 1024)
6177 /* we currently assume that a context is 1024 bytes */
6178 dev_alert(&bp->pdev->dev, "please adjust the size "
6179 "of cdu_context(%ld)\n",
6180 (long)sizeof(union cdu_context));
6181
6182 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
6183 val = (4 << 24) + (0 << 12) + 1024;
6184 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6185
6186 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
6187 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6188 /* enable context validation interrupt from CFC */
6189 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6190
6191 /* set the thresholds to prevent CFC/CDU race */
6192 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6193
6194 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
6195
6196 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
6197 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
6198
6199 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
6200 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
6201
6202 /* Reset PCIE errors for debug */
6203 REG_WR(bp, 0x2814, 0xffffffff);
6204 REG_WR(bp, 0x3820, 0xffffffff);
6205
6206 if (!CHIP_IS_E1x(bp)) {
6207 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
6208 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
6209 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
6210 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
6211 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
6212 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
6213 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
6214 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
6215 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
6216 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
6217 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
6218 }
6219
6220 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
6221 if (!CHIP_IS_E1(bp)) {
6222 /* in E3 this done in per-port section */
6223 if (!CHIP_IS_E3(bp))
6224 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
6225 }
6226 if (CHIP_IS_E1H(bp))
6227 /* not applicable for E2 (and above ...) */
6228 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
6229
6230 if (CHIP_REV_IS_SLOW(bp))
6231 msleep(200);
6232
6233 /* finish CFC init */
6234 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6235 if (val != 1) {
6236 BNX2X_ERR("CFC LL_INIT failed\n");
6237 return -EBUSY;
6238 }
6239 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6240 if (val != 1) {
6241 BNX2X_ERR("CFC AC_INIT failed\n");
6242 return -EBUSY;
6243 }
6244 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6245 if (val != 1) {
6246 BNX2X_ERR("CFC CAM_INIT failed\n");
6247 return -EBUSY;
6248 }
6249 REG_WR(bp, CFC_REG_DEBUG0, 0);
6250
6251 if (CHIP_IS_E1(bp)) {
6252 /* read NIG statistic
6253 to see if this is our first up since powerup */
6254 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6255 val = *bnx2x_sp(bp, wb_data[0]);
6256
6257 /* do internal memory self test */
6258 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6259 BNX2X_ERR("internal mem self test failed\n");
6260 return -EBUSY;
6261 }
6262 }
6263
6264 bnx2x_setup_fan_failure_detection(bp);
6265
6266 /* clear PXP2 attentions */
6267 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6268
6269 bnx2x_enable_blocks_attention(bp);
6270 bnx2x_enable_blocks_parity(bp);
6271
6272 if (!BP_NOMCP(bp)) {
6273 if (CHIP_IS_E1x(bp))
6274 bnx2x__common_init_phy(bp);
6275 } else
6276 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6277
6278 return 0;
6279}
6280
6281/**
6282 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
6283 *
6284 * @bp: driver handle
6285 */
6286static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
6287{
6288 int rc = bnx2x_init_hw_common(bp);
6289
6290 if (rc)
6291 return rc;
6292
6293 /* In E2 2-PORT mode, same ext phy is used for the two paths */
6294 if (!BP_NOMCP(bp))
6295 bnx2x__common_init_phy(bp);
6296
6297 return 0;
6298}
6299
6300static int bnx2x_init_hw_port(struct bnx2x *bp)
6301{
6302 int port = BP_PORT(bp);
6303 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
6304 u32 low, high;
6305 u32 val;
6306
6307 bnx2x__link_reset(bp);
6308
6309 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6310
6311 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6312
6313 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
6314 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
6315 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
6316
6317 /* Timers bug workaround: disables the pf_master bit in pglue at
6318 * common phase, we need to enable it here before any dmae access are
6319 * attempted. Therefore we manually added the enable-master to the
6320 * port phase (it also happens in the function phase)
6321 */
6322 if (!CHIP_IS_E1x(bp))
6323 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
6324
6325 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
6326 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
6327 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
6328 bnx2x_init_block(bp, BLOCK_QM, init_phase);
6329
6330 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
6331 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
6332 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
6333 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
6334
6335 /* QM cid (connection) count */
6336 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
6337
6338#ifdef BCM_CNIC
6339 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6340 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6341 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6342#endif
6343
6344 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6345
6346 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6347 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6348
6349 if (IS_MF(bp))
6350 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6351 else if (bp->dev->mtu > 4096) {
6352 if (bp->flags & ONE_PORT_FLAG)
6353 low = 160;
6354 else {
6355 val = bp->dev->mtu;
6356 /* (24*1024 + val*4)/256 */
6357 low = 96 + (val/64) +
6358 ((val % 64) ? 1 : 0);
6359 }
6360 } else
6361 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6362 high = low + 56; /* 14*1024/256 */
6363 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6364 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6365 }
6366
6367 if (CHIP_MODE_IS_4_PORT(bp))
6368 REG_WR(bp, (BP_PORT(bp) ?
6369 BRB1_REG_MAC_GUARANTIED_1 :
6370 BRB1_REG_MAC_GUARANTIED_0), 40);
6371
6372
6373 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6374 if (CHIP_IS_E3B0(bp))
6375 /* Ovlan exists only if we are in multi-function +
6376 * switch-dependent mode, in switch-independent there
6377 * is no ovlan headers
6378 */
6379 REG_WR(bp, BP_PORT(bp) ?
6380 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6381 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
6382 (bp->path_has_ovlan ? 7 : 6));
6383
6384 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
6385 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
6386 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
6387 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
6388
6389 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
6390 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
6391 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
6392 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
6393
6394 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
6395 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
6396
6397 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
6398
6399 if (CHIP_IS_E1x(bp)) {
6400 /* configure PBF to work without PAUSE mtu 9000 */
6401 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6402
6403 /* update threshold */
6404 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6405 /* update init credit */
6406 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6407
6408 /* probe changes */
6409 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6410 udelay(50);
6411 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6412 }
6413
6414#ifdef BCM_CNIC
6415 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
6416#endif
6417 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
6418 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
6419
6420 if (CHIP_IS_E1(bp)) {
6421 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6422 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6423 }
6424 bnx2x_init_block(bp, BLOCK_HC, init_phase);
6425
6426 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
6427
6428 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
6429 /* init aeu_mask_attn_func_0/1:
6430 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6431 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6432 * bits 4-7 are used for "per vn group attention" */
6433 val = IS_MF(bp) ? 0xF7 : 0x7;
6434 /* Enable DCBX attention for all but E1 */
6435 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
6436 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
6437
6438 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
6439
6440 if (!CHIP_IS_E1x(bp)) {
6441 /* Bit-map indicating which L2 hdrs may appear after the
6442 * basic Ethernet header
6443 */
6444 REG_WR(bp, BP_PORT(bp) ?
6445 NIG_REG_P1_HDRS_AFTER_BASIC :
6446 NIG_REG_P0_HDRS_AFTER_BASIC,
6447 IS_MF_SD(bp) ? 7 : 6);
6448
6449 if (CHIP_IS_E3(bp))
6450 REG_WR(bp, BP_PORT(bp) ?
6451 NIG_REG_LLH1_MF_MODE :
6452 NIG_REG_LLH_MF_MODE, IS_MF(bp));
6453 }
6454 if (!CHIP_IS_E3(bp))
6455 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6456
6457 if (!CHIP_IS_E1(bp)) {
6458 /* 0x2 disable mf_ov, 0x1 enable */
6459 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6460 (IS_MF_SD(bp) ? 0x1 : 0x2));
6461
6462 if (!CHIP_IS_E1x(bp)) {
6463 val = 0;
6464 switch (bp->mf_mode) {
6465 case MULTI_FUNCTION_SD:
6466 val = 1;
6467 break;
6468 case MULTI_FUNCTION_SI:
6469 val = 2;
6470 break;
6471 }
6472
6473 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
6474 NIG_REG_LLH0_CLS_TYPE), val);
6475 }
6476 {
6477 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6478 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6479 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6480 }
6481 }
6482
6483
6484 /* If SPIO5 is set to generate interrupts, enable it for this port */
6485 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6486 if (val & (1 << MISC_REGISTERS_SPIO_5)) {
6487 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6488 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6489 val = REG_RD(bp, reg_addr);
6490 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6491 REG_WR(bp, reg_addr, val);
6492 }
6493
6494 return 0;
6495}
6496
6497static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6498{
6499 int reg;
6500
6501 if (CHIP_IS_E1(bp))
6502 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6503 else
6504 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6505
6506 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6507}
6508
6509static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
6510{
6511 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
6512}
6513
6514static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
6515{
6516 u32 i, base = FUNC_ILT_BASE(func);
6517 for (i = base; i < base + ILT_PER_FUNC; i++)
6518 bnx2x_ilt_wr(bp, i, 0);
6519}
6520
6521static int bnx2x_init_hw_func(struct bnx2x *bp)
6522{
6523 int port = BP_PORT(bp);
6524 int func = BP_FUNC(bp);
6525 int init_phase = PHASE_PF0 + func;
6526 struct bnx2x_ilt *ilt = BP_ILT(bp);
6527 u16 cdu_ilt_start;
6528 u32 addr, val;
6529 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
6530 int i, main_mem_width;
6531
6532 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6533
6534 /* FLR cleanup - hmmm */
6535 if (!CHIP_IS_E1x(bp))
6536 bnx2x_pf_flr_clnup(bp);
6537
6538 /* set MSI reconfigure capability */
6539 if (bp->common.int_block == INT_BLOCK_HC) {
6540 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6541 val = REG_RD(bp, addr);
6542 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6543 REG_WR(bp, addr, val);
6544 }
6545
6546 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
6547 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
6548
6549 ilt = BP_ILT(bp);
6550 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
6551
6552 for (i = 0; i < L2_ILT_LINES(bp); i++) {
6553 ilt->lines[cdu_ilt_start + i].page =
6554 bp->context.vcxt + (ILT_PAGE_CIDS * i);
6555 ilt->lines[cdu_ilt_start + i].page_mapping =
6556 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
6557 /* cdu ilt pages are allocated manually so there's no need to
6558 set the size */
6559 }
6560 bnx2x_ilt_init_op(bp, INITOP_SET);
6561
6562#ifdef BCM_CNIC
6563 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
6564
6565 /* T1 hash bits value determines the T1 number of entries */
6566 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
6567#endif
6568
6569#ifndef BCM_CNIC
6570 /* set NIC mode */
6571 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6572#endif /* BCM_CNIC */
6573
6574 if (!CHIP_IS_E1x(bp)) {
6575 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
6576
6577 /* Turn on a single ISR mode in IGU if driver is going to use
6578 * INT#x or MSI
6579 */
6580 if (!(bp->flags & USING_MSIX_FLAG))
6581 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
6582 /*
6583 * Timers workaround bug: function init part.
6584 * Need to wait 20msec after initializing ILT,
6585 * needed to make sure there are no requests in
6586 * one of the PXP internal queues with "old" ILT addresses
6587 */
6588 msleep(20);
6589 /*
6590 * Master enable - Due to WB DMAE writes performed before this
6591 * register is re-initialized as part of the regular function
6592 * init
6593 */
6594 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
6595 /* Enable the function in IGU */
6596 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
6597 }
6598
6599 bp->dmae_ready = 1;
6600
6601 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
6602
6603 if (!CHIP_IS_E1x(bp))
6604 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
6605
6606 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
6607 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
6608 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
6609 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
6610 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
6611 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
6612 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
6613 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
6614 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
6615 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
6616 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
6617 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
6618 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
6619
6620 if (!CHIP_IS_E1x(bp))
6621 REG_WR(bp, QM_REG_PF_EN, 1);
6622
6623 if (!CHIP_IS_E1x(bp)) {
6624 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
6625 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
6626 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
6627 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
6628 }
6629 bnx2x_init_block(bp, BLOCK_QM, init_phase);
6630
6631 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6632 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6633 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6634 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6635 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
6636 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
6637 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
6638 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
6639 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
6640 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
6641 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
6642 if (!CHIP_IS_E1x(bp))
6643 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
6644
6645 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
6646
6647 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
6648
6649 if (!CHIP_IS_E1x(bp))
6650 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
6651
6652 if (IS_MF(bp)) {
6653 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6654 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
6655 }
6656
6657 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
6658
6659 /* HC init per function */
6660 if (bp->common.int_block == INT_BLOCK_HC) {
6661 if (CHIP_IS_E1H(bp)) {
6662 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6663
6664 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6665 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6666 }
6667 bnx2x_init_block(bp, BLOCK_HC, init_phase);
6668
6669 } else {
6670 int num_segs, sb_idx, prod_offset;
6671
6672 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6673
6674 if (!CHIP_IS_E1x(bp)) {
6675 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6676 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6677 }
6678
6679 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
6680
6681 if (!CHIP_IS_E1x(bp)) {
6682 int dsb_idx = 0;
6683 /**
6684 * Producer memory:
6685 * E2 mode: address 0-135 match to the mapping memory;
6686 * 136 - PF0 default prod; 137 - PF1 default prod;
6687 * 138 - PF2 default prod; 139 - PF3 default prod;
6688 * 140 - PF0 attn prod; 141 - PF1 attn prod;
6689 * 142 - PF2 attn prod; 143 - PF3 attn prod;
6690 * 144-147 reserved.
6691 *
6692 * E1.5 mode - In backward compatible mode;
6693 * for non default SB; each even line in the memory
6694 * holds the U producer and each odd line hold
6695 * the C producer. The first 128 producers are for
6696 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
6697 * producers are for the DSB for each PF.
6698 * Each PF has five segments: (the order inside each
6699 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
6700 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
6701 * 144-147 attn prods;
6702 */
6703 /* non-default-status-blocks */
6704 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
6705 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
6706 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
6707 prod_offset = (bp->igu_base_sb + sb_idx) *
6708 num_segs;
6709
6710 for (i = 0; i < num_segs; i++) {
6711 addr = IGU_REG_PROD_CONS_MEMORY +
6712 (prod_offset + i) * 4;
6713 REG_WR(bp, addr, 0);
6714 }
6715 /* send consumer update with value 0 */
6716 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
6717 USTORM_ID, 0, IGU_INT_NOP, 1);
6718 bnx2x_igu_clear_sb(bp,
6719 bp->igu_base_sb + sb_idx);
6720 }
6721
6722 /* default-status-blocks */
6723 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
6724 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
6725
6726 if (CHIP_MODE_IS_4_PORT(bp))
6727 dsb_idx = BP_FUNC(bp);
6728 else
6729 dsb_idx = BP_VN(bp);
6730
6731 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
6732 IGU_BC_BASE_DSB_PROD + dsb_idx :
6733 IGU_NORM_BASE_DSB_PROD + dsb_idx);
6734
6735 /*
6736 * igu prods come in chunks of E1HVN_MAX (4) -
6737 * does not matters what is the current chip mode
6738 */
6739 for (i = 0; i < (num_segs * E1HVN_MAX);
6740 i += E1HVN_MAX) {
6741 addr = IGU_REG_PROD_CONS_MEMORY +
6742 (prod_offset + i)*4;
6743 REG_WR(bp, addr, 0);
6744 }
6745 /* send consumer update with 0 */
6746 if (CHIP_INT_MODE_IS_BC(bp)) {
6747 bnx2x_ack_sb(bp, bp->igu_dsb_id,
6748 USTORM_ID, 0, IGU_INT_NOP, 1);
6749 bnx2x_ack_sb(bp, bp->igu_dsb_id,
6750 CSTORM_ID, 0, IGU_INT_NOP, 1);
6751 bnx2x_ack_sb(bp, bp->igu_dsb_id,
6752 XSTORM_ID, 0, IGU_INT_NOP, 1);
6753 bnx2x_ack_sb(bp, bp->igu_dsb_id,
6754 TSTORM_ID, 0, IGU_INT_NOP, 1);
6755 bnx2x_ack_sb(bp, bp->igu_dsb_id,
6756 ATTENTION_ID, 0, IGU_INT_NOP, 1);
6757 } else {
6758 bnx2x_ack_sb(bp, bp->igu_dsb_id,
6759 USTORM_ID, 0, IGU_INT_NOP, 1);
6760 bnx2x_ack_sb(bp, bp->igu_dsb_id,
6761 ATTENTION_ID, 0, IGU_INT_NOP, 1);
6762 }
6763 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
6764
6765 /* !!! these should become driver const once
6766 rf-tool supports split-68 const */
6767 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
6768 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
6769 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
6770 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
6771 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
6772 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
6773 }
6774 }
6775
6776 /* Reset PCIE errors for debug */
6777 REG_WR(bp, 0x2114, 0xffffffff);
6778 REG_WR(bp, 0x2120, 0xffffffff);
6779
6780 if (CHIP_IS_E1x(bp)) {
6781 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
6782 main_mem_base = HC_REG_MAIN_MEMORY +
6783 BP_PORT(bp) * (main_mem_size * 4);
6784 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
6785 main_mem_width = 8;
6786
6787 val = REG_RD(bp, main_mem_prty_clr);
6788 if (val)
6789 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
6790 "block during "
6791 "function init (0x%x)!\n", val);
6792
6793 /* Clear "false" parity errors in MSI-X table */
6794 for (i = main_mem_base;
6795 i < main_mem_base + main_mem_size * 4;
6796 i += main_mem_width) {
6797 bnx2x_read_dmae(bp, i, main_mem_width / 4);
6798 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
6799 i, main_mem_width / 4);
6800 }
6801 /* Clear HC parity attention */
6802 REG_RD(bp, main_mem_prty_clr);
6803 }
6804
6805#ifdef BNX2X_STOP_ON_ERROR
6806 /* Enable STORMs SP logging */
6807 REG_WR8(bp, BAR_USTRORM_INTMEM +
6808 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
6809 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6810 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
6811 REG_WR8(bp, BAR_CSTRORM_INTMEM +
6812 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
6813 REG_WR8(bp, BAR_XSTRORM_INTMEM +
6814 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
6815#endif
6816
6817 bnx2x_phy_probe(&bp->link_params);
6818
6819 return 0;
6820}
6821
6822
6823void bnx2x_free_mem(struct bnx2x *bp)
6824{
6825 /* fastpath */
6826 bnx2x_free_fp_mem(bp);
6827 /* end of fastpath */
6828
6829 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6830 sizeof(struct host_sp_status_block));
6831
6832 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
6833 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
6834
6835 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6836 sizeof(struct bnx2x_slowpath));
6837
6838 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
6839 bp->context.size);
6840
6841 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
6842
6843 BNX2X_FREE(bp->ilt->lines);
6844
6845#ifdef BCM_CNIC
6846 if (!CHIP_IS_E1x(bp))
6847 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
6848 sizeof(struct host_hc_status_block_e2));
6849 else
6850 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
6851 sizeof(struct host_hc_status_block_e1x));
6852
6853 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
6854#endif
6855
6856 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6857
6858 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6859 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6860}
6861
6862static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
6863{
6864 int num_groups;
6865
6866 /* number of eth_queues */
6867 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
6868
6869 /* Total number of FW statistics requests =
6870 * 1 for port stats + 1 for PF stats + num_eth_queues */
6871 bp->fw_stats_num = 2 + num_queue_stats;
6872
6873
6874 /* Request is built from stats_query_header and an array of
6875 * stats_query_cmd_group each of which contains
6876 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
6877 * configured in the stats_query_header.
6878 */
6879 num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
6880 (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
6881
6882 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
6883 num_groups * sizeof(struct stats_query_cmd_group);
6884
6885 /* Data for statistics requests + stats_conter
6886 *
6887 * stats_counter holds per-STORM counters that are incremented
6888 * when STORM has finished with the current request.
6889 */
6890 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
6891 sizeof(struct per_pf_stats) +
6892 sizeof(struct per_queue_stats) * num_queue_stats +
6893 sizeof(struct stats_counter);
6894
6895 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
6896 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
6897
6898 /* Set shortcuts */
6899 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
6900 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
6901
6902 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
6903 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
6904
6905 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
6906 bp->fw_stats_req_sz;
6907 return 0;
6908
6909alloc_mem_err:
6910 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
6911 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
6912 return -ENOMEM;
6913}
6914
6915
6916int bnx2x_alloc_mem(struct bnx2x *bp)
6917{
6918#ifdef BCM_CNIC
6919 if (!CHIP_IS_E1x(bp))
6920 /* size = the status block + ramrod buffers */
6921 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6922 sizeof(struct host_hc_status_block_e2));
6923 else
6924 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6925 sizeof(struct host_hc_status_block_e1x));
6926
6927 /* allocate searcher T2 table */
6928 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6929#endif
6930
6931
6932 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6933 sizeof(struct host_sp_status_block));
6934
6935 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6936 sizeof(struct bnx2x_slowpath));
6937
6938 /* Allocated memory for FW statistics */
6939 if (bnx2x_alloc_fw_stats_mem(bp))
6940 goto alloc_mem_err;
6941
6942 bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
6943
6944 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6945 bp->context.size);
6946
6947 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6948
6949 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6950 goto alloc_mem_err;
6951
6952 /* Slow path ring */
6953 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6954
6955 /* EQ */
6956 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6957 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6958
6959
6960 /* fastpath */
6961 /* need to be done at the end, since it's self adjusting to amount
6962 * of memory available for RSS queues
6963 */
6964 if (bnx2x_alloc_fp_mem(bp))
6965 goto alloc_mem_err;
6966 return 0;
6967
6968alloc_mem_err:
6969 bnx2x_free_mem(bp);
6970 return -ENOMEM;
6971}
6972
6973/*
6974 * Init service functions
6975 */
6976
6977int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
6978 struct bnx2x_vlan_mac_obj *obj, bool set,
6979 int mac_type, unsigned long *ramrod_flags)
6980{
6981 int rc;
6982 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
6983
6984 memset(&ramrod_param, 0, sizeof(ramrod_param));
6985
6986 /* Fill general parameters */
6987 ramrod_param.vlan_mac_obj = obj;
6988 ramrod_param.ramrod_flags = *ramrod_flags;
6989
6990 /* Fill a user request section if needed */
6991 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
6992 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
6993
6994 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
6995
6996 /* Set the command: ADD or DEL */
6997 if (set)
6998 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
6999 else
7000 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
7001 }
7002
7003 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
7004 if (rc < 0)
7005 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
7006 return rc;
7007}
7008
7009int bnx2x_del_all_macs(struct bnx2x *bp,
7010 struct bnx2x_vlan_mac_obj *mac_obj,
7011 int mac_type, bool wait_for_comp)
7012{
7013 int rc;
7014 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
7015
7016 /* Wait for completion of requested */
7017 if (wait_for_comp)
7018 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7019
7020 /* Set the mac type of addresses we want to clear */
7021 __set_bit(mac_type, &vlan_mac_flags);
7022
7023 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
7024 if (rc < 0)
7025 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
7026
7027 return rc;
7028}
7029
7030int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7031{
7032 unsigned long ramrod_flags = 0;
7033
7034 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7035
7036 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7037 /* Eth MAC is set on RSS leading client (fp[0]) */
7038 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
7039 BNX2X_ETH_MAC, &ramrod_flags);
7040}
7041
7042int bnx2x_setup_leading(struct bnx2x *bp)
7043{
7044 return bnx2x_setup_queue(bp, &bp->fp[0], 1);
7045}
7046
7047/**
7048 * bnx2x_set_int_mode - configure interrupt mode
7049 *
7050 * @bp: driver handle
7051 *
7052 * In case of MSI-X it will also try to enable MSI-X.
7053 */
7054static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7055{
7056 switch (int_mode) {
7057 case INT_MODE_MSI:
7058 bnx2x_enable_msi(bp);
7059 /* falling through... */
7060 case INT_MODE_INTx:
7061 bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
7062 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7063 break;
7064 default:
7065 /* Set number of queues according to bp->multi_mode value */
7066 bnx2x_set_num_queues(bp);
7067
7068 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7069 bp->num_queues);
7070
7071 /* if we can't use MSI-X we only need one fp,
7072 * so try to enable MSI-X with the requested number of fp's
7073 * and fallback to MSI or legacy INTx with one fp
7074 */
7075 if (bnx2x_enable_msix(bp)) {
7076 /* failed to enable MSI-X */
7077 if (bp->multi_mode)
7078 DP(NETIF_MSG_IFUP,
7079 "Multi requested but failed to "
7080 "enable MSI-X (%d), "
7081 "set number of queues to %d\n",
7082 bp->num_queues,
7083 1 + NON_ETH_CONTEXT_USE);
7084 bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
7085
7086 /* Try to enable MSI */
7087 if (!(bp->flags & DISABLE_MSI_FLAG))
7088 bnx2x_enable_msi(bp);
7089 }
7090 break;
7091 }
7092}
7093
7094/* must be called prioir to any HW initializations */
7095static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
7096{
7097 return L2_ILT_LINES(bp);
7098}
7099
7100void bnx2x_ilt_set_info(struct bnx2x *bp)
7101{
7102 struct ilt_client_info *ilt_client;
7103 struct bnx2x_ilt *ilt = BP_ILT(bp);
7104 u16 line = 0;
7105
7106 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
7107 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
7108
7109 /* CDU */
7110 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
7111 ilt_client->client_num = ILT_CLIENT_CDU;
7112 ilt_client->page_size = CDU_ILT_PAGE_SZ;
7113 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
7114 ilt_client->start = line;
7115 line += bnx2x_cid_ilt_lines(bp);
7116#ifdef BCM_CNIC
7117 line += CNIC_ILT_LINES;
7118#endif
7119 ilt_client->end = line - 1;
7120
7121 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
7122 "flags 0x%x, hw psz %d\n",
7123 ilt_client->start,
7124 ilt_client->end,
7125 ilt_client->page_size,
7126 ilt_client->flags,
7127 ilog2(ilt_client->page_size >> 12));
7128
7129 /* QM */
7130 if (QM_INIT(bp->qm_cid_count)) {
7131 ilt_client = &ilt->clients[ILT_CLIENT_QM];
7132 ilt_client->client_num = ILT_CLIENT_QM;
7133 ilt_client->page_size = QM_ILT_PAGE_SZ;
7134 ilt_client->flags = 0;
7135 ilt_client->start = line;
7136
7137 /* 4 bytes for each cid */
7138 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
7139 QM_ILT_PAGE_SZ);
7140
7141 ilt_client->end = line - 1;
7142
7143 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
7144 "flags 0x%x, hw psz %d\n",
7145 ilt_client->start,
7146 ilt_client->end,
7147 ilt_client->page_size,
7148 ilt_client->flags,
7149 ilog2(ilt_client->page_size >> 12));
7150
7151 }
7152 /* SRC */
7153 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7154#ifdef BCM_CNIC
7155 ilt_client->client_num = ILT_CLIENT_SRC;
7156 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7157 ilt_client->flags = 0;
7158 ilt_client->start = line;
7159 line += SRC_ILT_LINES;
7160 ilt_client->end = line - 1;
7161
7162 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
7163 "flags 0x%x, hw psz %d\n",
7164 ilt_client->start,
7165 ilt_client->end,
7166 ilt_client->page_size,
7167 ilt_client->flags,
7168 ilog2(ilt_client->page_size >> 12));
7169
7170#else
7171 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
7172#endif
7173
7174 /* TM */
7175 ilt_client = &ilt->clients[ILT_CLIENT_TM];
7176#ifdef BCM_CNIC
7177 ilt_client->client_num = ILT_CLIENT_TM;
7178 ilt_client->page_size = TM_ILT_PAGE_SZ;
7179 ilt_client->flags = 0;
7180 ilt_client->start = line;
7181 line += TM_ILT_LINES;
7182 ilt_client->end = line - 1;
7183
7184 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
7185 "flags 0x%x, hw psz %d\n",
7186 ilt_client->start,
7187 ilt_client->end,
7188 ilt_client->page_size,
7189 ilt_client->flags,
7190 ilog2(ilt_client->page_size >> 12));
7191
7192#else
7193 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
7194#endif
7195 BUG_ON(line > ILT_MAX_LINES);
7196}
7197
7198/**
7199 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
7200 *
7201 * @bp: driver handle
7202 * @fp: pointer to fastpath
7203 * @init_params: pointer to parameters structure
7204 *
7205 * parameters configured:
7206 * - HC configuration
7207 * - Queue's CDU context
7208 */
7209static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7210 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
7211{
7212
7213 u8 cos;
7214 /* FCoE Queue uses Default SB, thus has no HC capabilities */
7215 if (!IS_FCOE_FP(fp)) {
7216 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
7217 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
7218
7219 /* If HC is supporterd, enable host coalescing in the transition
7220 * to INIT state.
7221 */
7222 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
7223 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
7224
7225 /* HC rate */
7226 init_params->rx.hc_rate = bp->rx_ticks ?
7227 (1000000 / bp->rx_ticks) : 0;
7228 init_params->tx.hc_rate = bp->tx_ticks ?
7229 (1000000 / bp->tx_ticks) : 0;
7230
7231 /* FW SB ID */
7232 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
7233 fp->fw_sb_id;
7234
7235 /*
7236 * CQ index among the SB indices: FCoE clients uses the default
7237 * SB, therefore it's different.
7238 */
7239 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
7240 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
7241 }
7242
7243 /* set maximum number of COSs supported by this queue */
7244 init_params->max_cos = fp->max_cos;
7245
7246 DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d",
7247 fp->index, init_params->max_cos);
7248
7249 /* set the context pointers queue object */
7250 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
7251 init_params->cxts[cos] =
7252 &bp->context.vcxt[fp->txdata[cos].cid].eth;
7253}
7254
7255int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7256 struct bnx2x_queue_state_params *q_params,
7257 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
7258 int tx_index, bool leading)
7259{
7260 memset(tx_only_params, 0, sizeof(*tx_only_params));
7261
7262 /* Set the command */
7263 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
7264
7265 /* Set tx-only QUEUE flags: don't zero statistics */
7266 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
7267
7268 /* choose the index of the cid to send the slow path on */
7269 tx_only_params->cid_index = tx_index;
7270
7271 /* Set general TX_ONLY_SETUP parameters */
7272 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
7273
7274 /* Set Tx TX_ONLY_SETUP parameters */
7275 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
7276
7277 DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
7278 "cos %d, primary cid %d, cid %d, "
7279 "client id %d, sp-client id %d, flags %lx",
7280 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
7281 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
7282 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
7283
7284 /* send the ramrod */
7285 return bnx2x_queue_state_change(bp, q_params);
7286}
7287
7288
7289/**
7290 * bnx2x_setup_queue - setup queue
7291 *
7292 * @bp: driver handle
7293 * @fp: pointer to fastpath
7294 * @leading: is leading
7295 *
7296 * This function performs 2 steps in a Queue state machine
7297 * actually: 1) RESET->INIT 2) INIT->SETUP
7298 */
7299
7300int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7301 bool leading)
7302{
7303 struct bnx2x_queue_state_params q_params = {0};
7304 struct bnx2x_queue_setup_params *setup_params =
7305 &q_params.params.setup;
7306 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
7307 &q_params.params.tx_only;
7308 int rc;
7309 u8 tx_index;
7310
7311 DP(BNX2X_MSG_SP, "setting up queue %d", fp->index);
7312
7313 /* reset IGU state skip FCoE L2 queue */
7314 if (!IS_FCOE_FP(fp))
7315 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
7316 IGU_INT_ENABLE, 0);
7317
7318 q_params.q_obj = &fp->q_obj;
7319 /* We want to wait for completion in this context */
7320 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7321
7322 /* Prepare the INIT parameters */
7323 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
7324
7325 /* Set the command */
7326 q_params.cmd = BNX2X_Q_CMD_INIT;
7327
7328 /* Change the state to INIT */
7329 rc = bnx2x_queue_state_change(bp, &q_params);
7330 if (rc) {
7331 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
7332 return rc;
7333 }
7334
7335 DP(BNX2X_MSG_SP, "init complete");
7336
7337
7338 /* Now move the Queue to the SETUP state... */
7339 memset(setup_params, 0, sizeof(*setup_params));
7340
7341 /* Set QUEUE flags */
7342 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
7343
7344 /* Set general SETUP parameters */
7345 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
7346 FIRST_TX_COS_INDEX);
7347
7348 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
7349 &setup_params->rxq_params);
7350
7351 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
7352 FIRST_TX_COS_INDEX);
7353
7354 /* Set the command */
7355 q_params.cmd = BNX2X_Q_CMD_SETUP;
7356
7357 /* Change the state to SETUP */
7358 rc = bnx2x_queue_state_change(bp, &q_params);
7359 if (rc) {
7360 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
7361 return rc;
7362 }
7363
7364 /* loop through the relevant tx-only indices */
7365 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
7366 tx_index < fp->max_cos;
7367 tx_index++) {
7368
7369 /* prepare and send tx-only ramrod*/
7370 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
7371 tx_only_params, tx_index, leading);
7372 if (rc) {
7373 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
7374 fp->index, tx_index);
7375 return rc;
7376 }
7377 }
7378
7379 return rc;
7380}
7381
7382static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7383{
7384 struct bnx2x_fastpath *fp = &bp->fp[index];
7385 struct bnx2x_fp_txdata *txdata;
7386 struct bnx2x_queue_state_params q_params = {0};
7387 int rc, tx_index;
7388
7389 DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid);
7390
7391 q_params.q_obj = &fp->q_obj;
7392 /* We want to wait for completion in this context */
7393 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7394
7395
7396 /* close tx-only connections */
7397 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
7398 tx_index < fp->max_cos;
7399 tx_index++){
7400
7401 /* ascertain this is a normal queue*/
7402 txdata = &fp->txdata[tx_index];
7403
7404 DP(BNX2X_MSG_SP, "stopping tx-only queue %d",
7405 txdata->txq_index);
7406
7407 /* send halt terminate on tx-only connection */
7408 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
7409 memset(&q_params.params.terminate, 0,
7410 sizeof(q_params.params.terminate));
7411 q_params.params.terminate.cid_index = tx_index;
7412
7413 rc = bnx2x_queue_state_change(bp, &q_params);
7414 if (rc)
7415 return rc;
7416
7417 /* send halt terminate on tx-only connection */
7418 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
7419 memset(&q_params.params.cfc_del, 0,
7420 sizeof(q_params.params.cfc_del));
7421 q_params.params.cfc_del.cid_index = tx_index;
7422 rc = bnx2x_queue_state_change(bp, &q_params);
7423 if (rc)
7424 return rc;
7425 }
7426 /* Stop the primary connection: */
7427 /* ...halt the connection */
7428 q_params.cmd = BNX2X_Q_CMD_HALT;
7429 rc = bnx2x_queue_state_change(bp, &q_params);
7430 if (rc)
7431 return rc;
7432
7433 /* ...terminate the connection */
7434 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
7435 memset(&q_params.params.terminate, 0,
7436 sizeof(q_params.params.terminate));
7437 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
7438 rc = bnx2x_queue_state_change(bp, &q_params);
7439 if (rc)
7440 return rc;
7441 /* ...delete cfc entry */
7442 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
7443 memset(&q_params.params.cfc_del, 0,
7444 sizeof(q_params.params.cfc_del));
7445 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
7446 return bnx2x_queue_state_change(bp, &q_params);
7447}
7448
7449
7450static void bnx2x_reset_func(struct bnx2x *bp)
7451{
7452 int port = BP_PORT(bp);
7453 int func = BP_FUNC(bp);
7454 int i;
7455
7456 /* Disable the function in the FW */
7457 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
7458 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
7459 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
7460 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
7461
7462 /* FP SBs */
7463 for_each_eth_queue(bp, i) {
7464 struct bnx2x_fastpath *fp = &bp->fp[i];
7465 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7466 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
7467 SB_DISABLED);
7468 }
7469
7470#ifdef BCM_CNIC
7471 /* CNIC SB */
7472 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7473 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
7474 SB_DISABLED);
7475#endif
7476 /* SP SB */
7477 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7478 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
7479 SB_DISABLED);
7480
7481 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
7482 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
7483 0);
7484
7485 /* Configure IGU */
7486 if (bp->common.int_block == INT_BLOCK_HC) {
7487 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7488 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7489 } else {
7490 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7491 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7492 }
7493
7494#ifdef BCM_CNIC
7495 /* Disable Timer scan */
7496 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7497 /*
7498 * Wait for at least 10ms and up to 2 second for the timers scan to
7499 * complete
7500 */
7501 for (i = 0; i < 200; i++) {
7502 msleep(10);
7503 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7504 break;
7505 }
7506#endif
7507 /* Clear ILT */
7508 bnx2x_clear_func_ilt(bp, func);
7509
7510 /* Timers workaround bug for E2: if this is vnic-3,
7511 * we need to set the entire ilt range for this timers.
7512 */
7513 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
7514 struct ilt_client_info ilt_cli;
7515 /* use dummy TM client */
7516 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7517 ilt_cli.start = 0;
7518 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7519 ilt_cli.client_num = ILT_CLIENT_TM;
7520
7521 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7522 }
7523
7524 /* this assumes that reset_port() called before reset_func()*/
7525 if (!CHIP_IS_E1x(bp))
7526 bnx2x_pf_disable(bp);
7527
7528 bp->dmae_ready = 0;
7529}
7530
7531static void bnx2x_reset_port(struct bnx2x *bp)
7532{
7533 int port = BP_PORT(bp);
7534 u32 val;
7535
7536 /* Reset physical Link */
7537 bnx2x__link_reset(bp);
7538
7539 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7540
7541 /* Do not rcv packets to BRB */
7542 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7543 /* Do not direct rcv packets that are not for MCP to the BRB */
7544 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7545 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7546
7547 /* Configure AEU */
7548 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7549
7550 msleep(100);
7551 /* Check for BRB port occupancy */
7552 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7553 if (val)
7554 DP(NETIF_MSG_IFDOWN,
7555 "BRB1 is not empty %d blocks are occupied\n", val);
7556
7557 /* TODO: Close Doorbell port? */
7558}
7559
7560static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
7561{
7562 struct bnx2x_func_state_params func_params = {0};
7563
7564 /* Prepare parameters for function state transitions */
7565 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7566
7567 func_params.f_obj = &bp->func_obj;
7568 func_params.cmd = BNX2X_F_CMD_HW_RESET;
7569
7570 func_params.params.hw_init.load_phase = load_code;
7571
7572 return bnx2x_func_state_change(bp, &func_params);
7573}
7574
7575static inline int bnx2x_func_stop(struct bnx2x *bp)
7576{
7577 struct bnx2x_func_state_params func_params = {0};
7578 int rc;
7579
7580 /* Prepare parameters for function state transitions */
7581 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7582 func_params.f_obj = &bp->func_obj;
7583 func_params.cmd = BNX2X_F_CMD_STOP;
7584
7585 /*
7586 * Try to stop the function the 'good way'. If fails (in case
7587 * of a parity error during bnx2x_chip_cleanup()) and we are
7588 * not in a debug mode, perform a state transaction in order to
7589 * enable further HW_RESET transaction.
7590 */
7591 rc = bnx2x_func_state_change(bp, &func_params);
7592 if (rc) {
7593#ifdef BNX2X_STOP_ON_ERROR
7594 return rc;
7595#else
7596 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
7597 "transaction\n");
7598 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
7599 return bnx2x_func_state_change(bp, &func_params);
7600#endif
7601 }
7602
7603 return 0;
7604}
7605
7606/**
7607 * bnx2x_send_unload_req - request unload mode from the MCP.
7608 *
7609 * @bp: driver handle
7610 * @unload_mode: requested function's unload mode
7611 *
7612 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
7613 */
7614u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7615{
7616 u32 reset_code = 0;
7617 int port = BP_PORT(bp);
7618
7619 /* Select the UNLOAD request mode */
7620 if (unload_mode == UNLOAD_NORMAL)
7621 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7622
7623 else if (bp->flags & NO_WOL_FLAG)
7624 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7625
7626 else if (bp->wol) {
7627 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7628 u8 *mac_addr = bp->dev->dev_addr;
7629 u32 val;
7630 u16 pmc;
7631
7632 /* The mac address is written to entries 1-4 to
7633 * preserve entry 0 which is used by the PMF
7634 */
7635 u8 entry = (BP_VN(bp) + 1)*8;
7636
7637 val = (mac_addr[0] << 8) | mac_addr[1];
7638 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7639
7640 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7641 (mac_addr[4] << 8) | mac_addr[5];
7642 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7643
7644 /* Enable the PME and clear the status */
7645 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
7646 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
7647 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
7648
7649 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7650
7651 } else
7652 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7653
7654 /* Send the request to the MCP */
7655 if (!BP_NOMCP(bp))
7656 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7657 else {
7658 int path = BP_PATH(bp);
7659
7660 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7661 "%d, %d, %d\n",
7662 path, load_count[path][0], load_count[path][1],
7663 load_count[path][2]);
7664 load_count[path][0]--;
7665 load_count[path][1 + port]--;
7666 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7667 "%d, %d, %d\n",
7668 path, load_count[path][0], load_count[path][1],
7669 load_count[path][2]);
7670 if (load_count[path][0] == 0)
7671 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7672 else if (load_count[path][1 + port] == 0)
7673 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7674 else
7675 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7676 }
7677
7678 return reset_code;
7679}
7680
7681/**
7682 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
7683 *
7684 * @bp: driver handle
7685 */
7686void bnx2x_send_unload_done(struct bnx2x *bp)
7687{
7688 /* Report UNLOAD_DONE to MCP */
7689 if (!BP_NOMCP(bp))
7690 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7691}
7692
7693static inline int bnx2x_func_wait_started(struct bnx2x *bp)
7694{
7695 int tout = 50;
7696 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
7697
7698 if (!bp->port.pmf)
7699 return 0;
7700
7701 /*
7702 * (assumption: No Attention from MCP at this stage)
7703 * PMF probably in the middle of TXdisable/enable transaction
7704 * 1. Sync IRS for default SB
7705 * 2. Sync SP queue - this guarantes us that attention handling started
7706 * 3. Wait, that TXdisable/enable transaction completes
7707 *
7708 * 1+2 guranty that if DCBx attention was scheduled it already changed
7709 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
7710 * received complettion for the transaction the state is TX_STOPPED.
7711 * State will return to STARTED after completion of TX_STOPPED-->STARTED
7712 * transaction.
7713 */
7714
7715 /* make sure default SB ISR is done */
7716 if (msix)
7717 synchronize_irq(bp->msix_table[0].vector);
7718 else
7719 synchronize_irq(bp->pdev->irq);
7720
7721 flush_workqueue(bnx2x_wq);
7722
7723 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
7724 BNX2X_F_STATE_STARTED && tout--)
7725 msleep(20);
7726
7727 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
7728 BNX2X_F_STATE_STARTED) {
7729#ifdef BNX2X_STOP_ON_ERROR
7730 return -EBUSY;
7731#else
7732 /*
7733 * Failed to complete the transaction in a "good way"
7734 * Force both transactions with CLR bit
7735 */
7736 struct bnx2x_func_state_params func_params = {0};
7737
7738 DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
7739 "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
7740
7741 func_params.f_obj = &bp->func_obj;
7742 __set_bit(RAMROD_DRV_CLR_ONLY,
7743 &func_params.ramrod_flags);
7744
7745 /* STARTED-->TX_ST0PPED */
7746 func_params.cmd = BNX2X_F_CMD_TX_STOP;
7747 bnx2x_func_state_change(bp, &func_params);
7748
7749 /* TX_ST0PPED-->STARTED */
7750 func_params.cmd = BNX2X_F_CMD_TX_START;
7751 return bnx2x_func_state_change(bp, &func_params);
7752#endif
7753 }
7754
7755 return 0;
7756}
7757
7758void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7759{
7760 int port = BP_PORT(bp);
7761 int i, rc = 0;
7762 u8 cos;
7763 struct bnx2x_mcast_ramrod_params rparam = {0};
7764 u32 reset_code;
7765
7766 /* Wait until tx fastpath tasks complete */
7767 for_each_tx_queue(bp, i) {
7768 struct bnx2x_fastpath *fp = &bp->fp[i];
7769
7770 for_each_cos_in_tx_queue(fp, cos)
7771 rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
7772#ifdef BNX2X_STOP_ON_ERROR
7773 if (rc)
7774 return;
7775#endif
7776 }
7777
7778 /* Give HW time to discard old tx messages */
7779 usleep_range(1000, 1000);
7780
7781 /* Clean all ETH MACs */
7782 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
7783 if (rc < 0)
7784 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
7785
7786 /* Clean up UC list */
7787 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
7788 true);
7789 if (rc < 0)
7790 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
7791 "%d\n", rc);
7792
7793 /* Disable LLH */
7794 if (!CHIP_IS_E1(bp))
7795 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7796
7797 /* Set "drop all" (stop Rx).
7798 * We need to take a netif_addr_lock() here in order to prevent
7799 * a race between the completion code and this code.
7800 */
7801 netif_addr_lock_bh(bp->dev);
7802 /* Schedule the rx_mode command */
7803 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
7804 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
7805 else
7806 bnx2x_set_storm_rx_mode(bp);
7807
7808 /* Cleanup multicast configuration */
7809 rparam.mcast_obj = &bp->mcast_obj;
7810 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
7811 if (rc < 0)
7812 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
7813
7814 netif_addr_unlock_bh(bp->dev);
7815
7816
7817
7818 /*
7819 * Send the UNLOAD_REQUEST to the MCP. This will return if
7820 * this function should perform FUNC, PORT or COMMON HW
7821 * reset.
7822 */
7823 reset_code = bnx2x_send_unload_req(bp, unload_mode);
7824
7825 /*
7826 * (assumption: No Attention from MCP at this stage)
7827 * PMF probably in the middle of TXdisable/enable transaction
7828 */
7829 rc = bnx2x_func_wait_started(bp);
7830 if (rc) {
7831 BNX2X_ERR("bnx2x_func_wait_started failed\n");
7832#ifdef BNX2X_STOP_ON_ERROR
7833 return;
7834#endif
7835 }
7836
7837 /* Close multi and leading connections
7838 * Completions for ramrods are collected in a synchronous way
7839 */
7840 for_each_queue(bp, i)
7841 if (bnx2x_stop_queue(bp, i))
7842#ifdef BNX2X_STOP_ON_ERROR
7843 return;
7844#else
7845 goto unload_error;
7846#endif
7847 /* If SP settings didn't get completed so far - something
7848 * very wrong has happen.
7849 */
7850 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
7851 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
7852
7853#ifndef BNX2X_STOP_ON_ERROR
7854unload_error:
7855#endif
7856 rc = bnx2x_func_stop(bp);
7857 if (rc) {
7858 BNX2X_ERR("Function stop failed!\n");
7859#ifdef BNX2X_STOP_ON_ERROR
7860 return;
7861#endif
7862 }
7863
7864 /* Disable HW interrupts, NAPI */
7865 bnx2x_netif_stop(bp, 1);
7866
7867 /* Release IRQs */
7868 bnx2x_free_irq(bp);
7869
7870 /* Reset the chip */
7871 rc = bnx2x_reset_hw(bp, reset_code);
7872 if (rc)
7873 BNX2X_ERR("HW_RESET failed\n");
7874
7875
7876 /* Report UNLOAD_DONE to MCP */
7877 bnx2x_send_unload_done(bp);
7878}
7879
7880void bnx2x_disable_close_the_gate(struct bnx2x *bp)
7881{
7882 u32 val;
7883
7884 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7885
7886 if (CHIP_IS_E1(bp)) {
7887 int port = BP_PORT(bp);
7888 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7889 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7890
7891 val = REG_RD(bp, addr);
7892 val &= ~(0x300);
7893 REG_WR(bp, addr, val);
7894 } else {
7895 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7896 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7897 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7898 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7899 }
7900}
7901
7902/* Close gates #2, #3 and #4: */
7903static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7904{
7905 u32 val;
7906
7907 /* Gates #2 and #4a are closed/opened for "not E1" only */
7908 if (!CHIP_IS_E1(bp)) {
7909 /* #4 */
7910 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
7911 /* #2 */
7912 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
7913 }
7914
7915 /* #3 */
7916 if (CHIP_IS_E1x(bp)) {
7917 /* Prevent interrupts from HC on both ports */
7918 val = REG_RD(bp, HC_REG_CONFIG_1);
7919 REG_WR(bp, HC_REG_CONFIG_1,
7920 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
7921 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
7922
7923 val = REG_RD(bp, HC_REG_CONFIG_0);
7924 REG_WR(bp, HC_REG_CONFIG_0,
7925 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
7926 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
7927 } else {
7928 /* Prevent incomming interrupts in IGU */
7929 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
7930
7931 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
7932 (!close) ?
7933 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
7934 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
7935 }
7936
7937 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7938 close ? "closing" : "opening");
7939 mmiowb();
7940}
7941
7942#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7943
7944static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7945{
7946 /* Do some magic... */
7947 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7948 *magic_val = val & SHARED_MF_CLP_MAGIC;
7949 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7950}
7951
7952/**
7953 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
7954 *
7955 * @bp: driver handle
7956 * @magic_val: old value of the `magic' bit.
7957 */
7958static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7959{
7960 /* Restore the `magic' bit value... */
7961 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7962 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7963 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7964}
7965
7966/**
7967 * bnx2x_reset_mcp_prep - prepare for MCP reset.
7968 *
7969 * @bp: driver handle
7970 * @magic_val: old value of 'magic' bit.
7971 *
7972 * Takes care of CLP configurations.
7973 */
7974static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7975{
7976 u32 shmem;
7977 u32 validity_offset;
7978
7979 DP(NETIF_MSG_HW, "Starting\n");
7980
7981 /* Set `magic' bit in order to save MF config */
7982 if (!CHIP_IS_E1(bp))
7983 bnx2x_clp_reset_prep(bp, magic_val);
7984
7985 /* Get shmem offset */
7986 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7987 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7988
7989 /* Clear validity map flags */
7990 if (shmem > 0)
7991 REG_WR(bp, shmem + validity_offset, 0);
7992}
7993
7994#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7995#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7996
7997/**
7998 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
7999 *
8000 * @bp: driver handle
8001 */
8002static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8003{
8004 /* special handling for emulation and FPGA,
8005 wait 10 times longer */
8006 if (CHIP_REV_IS_SLOW(bp))
8007 msleep(MCP_ONE_TIMEOUT*10);
8008 else
8009 msleep(MCP_ONE_TIMEOUT);
8010}
8011
8012/*
8013 * initializes bp->common.shmem_base and waits for validity signature to appear
8014 */
8015static int bnx2x_init_shmem(struct bnx2x *bp)
8016{
8017 int cnt = 0;
8018 u32 val = 0;
8019
8020 do {
8021 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8022 if (bp->common.shmem_base) {
8023 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8024 if (val & SHR_MEM_VALIDITY_MB)
8025 return 0;
8026 }
8027
8028 bnx2x_mcp_wait_one(bp);
8029
8030 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
8031
8032 BNX2X_ERR("BAD MCP validity signature\n");
8033
8034 return -ENODEV;
8035}
8036
8037static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8038{
8039 int rc = bnx2x_init_shmem(bp);
8040
8041 /* Restore the `magic' bit value */
8042 if (!CHIP_IS_E1(bp))
8043 bnx2x_clp_reset_done(bp, magic_val);
8044
8045 return rc;
8046}
8047
8048static void bnx2x_pxp_prep(struct bnx2x *bp)
8049{
8050 if (!CHIP_IS_E1(bp)) {
8051 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8052 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8053 mmiowb();
8054 }
8055}
8056
8057/*
8058 * Reset the whole chip except for:
8059 * - PCIE core
8060 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8061 * one reset bit)
8062 * - IGU
8063 * - MISC (including AEU)
8064 * - GRC
8065 * - RBCN, RBCP
8066 */
8067static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
8068{
8069 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8070 u32 global_bits2, stay_reset2;
8071
8072 /*
8073 * Bits that have to be set in reset_mask2 if we want to reset 'global'
8074 * (per chip) blocks.
8075 */
8076 global_bits2 =
8077 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
8078 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
8079
8080 /* Don't reset the following blocks */
8081 not_reset_mask1 =
8082 MISC_REGISTERS_RESET_REG_1_RST_HC |
8083 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8084 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8085
8086 not_reset_mask2 =
8087 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
8088 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8089 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8090 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8091 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8092 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8093 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8094 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
8095 MISC_REGISTERS_RESET_REG_2_RST_ATC |
8096 MISC_REGISTERS_RESET_REG_2_PGLC;
8097
8098 /*
8099 * Keep the following blocks in reset:
8100 * - all xxMACs are handled by the bnx2x_link code.
8101 */
8102 stay_reset2 =
8103 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
8104 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
8105 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
8106 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
8107 MISC_REGISTERS_RESET_REG_2_UMAC0 |
8108 MISC_REGISTERS_RESET_REG_2_UMAC1 |
8109 MISC_REGISTERS_RESET_REG_2_XMAC |
8110 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
8111
8112 /* Full reset masks according to the chip */
8113 reset_mask1 = 0xffffffff;
8114
8115 if (CHIP_IS_E1(bp))
8116 reset_mask2 = 0xffff;
8117 else if (CHIP_IS_E1H(bp))
8118 reset_mask2 = 0x1ffff;
8119 else if (CHIP_IS_E2(bp))
8120 reset_mask2 = 0xfffff;
8121 else /* CHIP_IS_E3 */
8122 reset_mask2 = 0x3ffffff;
8123
8124 /* Don't reset global blocks unless we need to */
8125 if (!global)
8126 reset_mask2 &= ~global_bits2;
8127
8128 /*
8129 * In case of attention in the QM, we need to reset PXP
8130 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
8131 * because otherwise QM reset would release 'close the gates' shortly
8132 * before resetting the PXP, then the PSWRQ would send a write
8133 * request to PGLUE. Then when PXP is reset, PGLUE would try to
8134 * read the payload data from PSWWR, but PSWWR would not
8135 * respond. The write queue in PGLUE would stuck, dmae commands
8136 * would not return. Therefore it's important to reset the second
8137 * reset register (containing the
8138 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
8139 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
8140 * bit).
8141 */
8142 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8143 reset_mask2 & (~not_reset_mask2));
8144
8145 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8146 reset_mask1 & (~not_reset_mask1));
8147
8148 barrier();
8149 mmiowb();
8150
8151 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
8152 reset_mask2 & (~stay_reset2));
8153
8154 barrier();
8155 mmiowb();
8156
8157 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8158 mmiowb();
8159}
8160
8161/**
8162 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
8163 * It should get cleared in no more than 1s.
8164 *
8165 * @bp: driver handle
8166 *
8167 * It should get cleared in no more than 1s. Returns 0 if
8168 * pending writes bit gets cleared.
8169 */
8170static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
8171{
8172 u32 cnt = 1000;
8173 u32 pend_bits = 0;
8174
8175 do {
8176 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
8177
8178 if (pend_bits == 0)
8179 break;
8180
8181 usleep_range(1000, 1000);
8182 } while (cnt-- > 0);
8183
8184 if (cnt <= 0) {
8185 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
8186 pend_bits);
8187 return -EBUSY;
8188 }
8189
8190 return 0;
8191}
8192
8193static int bnx2x_process_kill(struct bnx2x *bp, bool global)
8194{
8195 int cnt = 1000;
8196 u32 val = 0;
8197 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8198
8199
8200 /* Empty the Tetris buffer, wait for 1s */
8201 do {
8202 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8203 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8204 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8205 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8206 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8207 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8208 ((port_is_idle_0 & 0x1) == 0x1) &&
8209 ((port_is_idle_1 & 0x1) == 0x1) &&
8210 (pgl_exp_rom2 == 0xffffffff))
8211 break;
8212 usleep_range(1000, 1000);
8213 } while (cnt-- > 0);
8214
8215 if (cnt <= 0) {
8216 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8217 " are still"
8218 " outstanding read requests after 1s!\n");
8219 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8220 " port_is_idle_0=0x%08x,"
8221 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8222 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8223 pgl_exp_rom2);
8224 return -EAGAIN;
8225 }
8226
8227 barrier();
8228
8229 /* Close gates #2, #3 and #4 */
8230 bnx2x_set_234_gates(bp, true);
8231
8232 /* Poll for IGU VQs for 57712 and newer chips */
8233 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
8234 return -EAGAIN;
8235
8236
8237 /* TBD: Indicate that "process kill" is in progress to MCP */
8238
8239 /* Clear "unprepared" bit */
8240 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8241 barrier();
8242
8243 /* Make sure all is written to the chip before the reset */
8244 mmiowb();
8245
8246 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8247 * PSWHST, GRC and PSWRD Tetris buffer.
8248 */
8249 usleep_range(1000, 1000);
8250
8251 /* Prepare to chip reset: */
8252 /* MCP */
8253 if (global)
8254 bnx2x_reset_mcp_prep(bp, &val);
8255
8256 /* PXP */
8257 bnx2x_pxp_prep(bp);
8258 barrier();
8259
8260 /* reset the chip */
8261 bnx2x_process_kill_chip_reset(bp, global);
8262 barrier();
8263
8264 /* Recover after reset: */
8265 /* MCP */
8266 if (global && bnx2x_reset_mcp_comp(bp, val))
8267 return -EAGAIN;
8268
8269 /* TBD: Add resetting the NO_MCP mode DB here */
8270
8271 /* PXP */
8272 bnx2x_pxp_prep(bp);
8273
8274 /* Open the gates #2, #3 and #4 */
8275 bnx2x_set_234_gates(bp, false);
8276
8277 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8278 * reset state, re-enable attentions. */
8279
8280 return 0;
8281}
8282
8283int bnx2x_leader_reset(struct bnx2x *bp)
8284{
8285 int rc = 0;
8286 bool global = bnx2x_reset_is_global(bp);
8287
8288 /* Try to recover after the failure */
8289 if (bnx2x_process_kill(bp, global)) {
8290 netdev_err(bp->dev, "Something bad had happen on engine %d! "
8291 "Aii!\n", BP_PATH(bp));
8292 rc = -EAGAIN;
8293 goto exit_leader_reset;
8294 }
8295
8296 /*
8297 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
8298 * state.
8299 */
8300 bnx2x_set_reset_done(bp);
8301 if (global)
8302 bnx2x_clear_reset_global(bp);
8303
8304exit_leader_reset:
8305 bp->is_leader = 0;
8306 bnx2x_release_leader_lock(bp);
8307 smp_mb();
8308 return rc;
8309}
8310
8311static inline void bnx2x_recovery_failed(struct bnx2x *bp)
8312{
8313 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
8314
8315 /* Disconnect this device */
8316 netif_device_detach(bp->dev);
8317
8318 /*
8319 * Block ifup for all function on this engine until "process kill"
8320 * or power cycle.
8321 */
8322 bnx2x_set_reset_in_progress(bp);
8323
8324 /* Shut down the power */
8325 bnx2x_set_power_state(bp, PCI_D3hot);
8326
8327 bp->recovery_state = BNX2X_RECOVERY_FAILED;
8328
8329 smp_mb();
8330}
8331
8332/*
8333 * Assumption: runs under rtnl lock. This together with the fact
8334 * that it's called only from bnx2x_sp_rtnl() ensure that it
8335 * will never be called when netif_running(bp->dev) is false.
8336 */
8337static void bnx2x_parity_recover(struct bnx2x *bp)
8338{
8339 bool global = false;
8340
8341 DP(NETIF_MSG_HW, "Handling parity\n");
8342 while (1) {
8343 switch (bp->recovery_state) {
8344 case BNX2X_RECOVERY_INIT:
8345 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8346 bnx2x_chk_parity_attn(bp, &global, false);
8347
8348 /* Try to get a LEADER_LOCK HW lock */
8349 if (bnx2x_trylock_leader_lock(bp)) {
8350 bnx2x_set_reset_in_progress(bp);
8351 /*
8352 * Check if there is a global attention and if
8353 * there was a global attention, set the global
8354 * reset bit.
8355 */
8356
8357 if (global)
8358 bnx2x_set_reset_global(bp);
8359
8360 bp->is_leader = 1;
8361 }
8362
8363 /* Stop the driver */
8364 /* If interface has been removed - break */
8365 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8366 return;
8367
8368 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8369
8370 /*
8371 * Reset MCP command sequence number and MCP mail box
8372 * sequence as we are going to reset the MCP.
8373 */
8374 if (global) {
8375 bp->fw_seq = 0;
8376 bp->fw_drv_pulse_wr_seq = 0;
8377 }
8378
8379 /* Ensure "is_leader", MCP command sequence and
8380 * "recovery_state" update values are seen on other
8381 * CPUs.
8382 */
8383 smp_mb();
8384 break;
8385
8386 case BNX2X_RECOVERY_WAIT:
8387 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8388 if (bp->is_leader) {
8389 int other_engine = BP_PATH(bp) ? 0 : 1;
8390 u32 other_load_counter =
8391 bnx2x_get_load_cnt(bp, other_engine);
8392 u32 load_counter =
8393 bnx2x_get_load_cnt(bp, BP_PATH(bp));
8394 global = bnx2x_reset_is_global(bp);
8395
8396 /*
8397 * In case of a parity in a global block, let
8398 * the first leader that performs a
8399 * leader_reset() reset the global blocks in
8400 * order to clear global attentions. Otherwise
8401 * the the gates will remain closed for that
8402 * engine.
8403 */
8404 if (load_counter ||
8405 (global && other_load_counter)) {
8406 /* Wait until all other functions get
8407 * down.
8408 */
8409 schedule_delayed_work(&bp->sp_rtnl_task,
8410 HZ/10);
8411 return;
8412 } else {
8413 /* If all other functions got down -
8414 * try to bring the chip back to
8415 * normal. In any case it's an exit
8416 * point for a leader.
8417 */
8418 if (bnx2x_leader_reset(bp)) {
8419 bnx2x_recovery_failed(bp);
8420 return;
8421 }
8422
8423 /* If we are here, means that the
8424 * leader has succeeded and doesn't
8425 * want to be a leader any more. Try
8426 * to continue as a none-leader.
8427 */
8428 break;
8429 }
8430 } else { /* non-leader */
8431 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
8432 /* Try to get a LEADER_LOCK HW lock as
8433 * long as a former leader may have
8434 * been unloaded by the user or
8435 * released a leadership by another
8436 * reason.
8437 */
8438 if (bnx2x_trylock_leader_lock(bp)) {
8439 /* I'm a leader now! Restart a
8440 * switch case.
8441 */
8442 bp->is_leader = 1;
8443 break;
8444 }
8445
8446 schedule_delayed_work(&bp->sp_rtnl_task,
8447 HZ/10);
8448 return;
8449
8450 } else {
8451 /*
8452 * If there was a global attention, wait
8453 * for it to be cleared.
8454 */
8455 if (bnx2x_reset_is_global(bp)) {
8456 schedule_delayed_work(
8457 &bp->sp_rtnl_task,
8458 HZ/10);
8459 return;
8460 }
8461
8462 if (bnx2x_nic_load(bp, LOAD_NORMAL))
8463 bnx2x_recovery_failed(bp);
8464 else {
8465 bp->recovery_state =
8466 BNX2X_RECOVERY_DONE;
8467 smp_mb();
8468 }
8469
8470 return;
8471 }
8472 }
8473 default:
8474 return;
8475 }
8476 }
8477}
8478
8479/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8480 * scheduled on a general queue in order to prevent a dead lock.
8481 */
8482static void bnx2x_sp_rtnl_task(struct work_struct *work)
8483{
8484 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
8485
8486 rtnl_lock();
8487
8488 if (!netif_running(bp->dev))
8489 goto sp_rtnl_exit;
8490
8491 /* if stop on error is defined no recovery flows should be executed */
8492#ifdef BNX2X_STOP_ON_ERROR
8493 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
8494 "so reset not done to allow debug dump,\n"
8495 "you will need to reboot when done\n");
8496 goto sp_rtnl_not_reset;
8497#endif
8498
8499 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
8500 /*
8501 * Clear all pending SP commands as we are going to reset the
8502 * function anyway.
8503 */
8504 bp->sp_rtnl_state = 0;
8505 smp_mb();
8506
8507 bnx2x_parity_recover(bp);
8508
8509 goto sp_rtnl_exit;
8510 }
8511
8512 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
8513 /*
8514 * Clear all pending SP commands as we are going to reset the
8515 * function anyway.
8516 */
8517 bp->sp_rtnl_state = 0;
8518 smp_mb();
8519
8520 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8521 bnx2x_nic_load(bp, LOAD_NORMAL);
8522
8523 goto sp_rtnl_exit;
8524 }
8525#ifdef BNX2X_STOP_ON_ERROR
8526sp_rtnl_not_reset:
8527#endif
8528 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
8529 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
8530
8531sp_rtnl_exit:
8532 rtnl_unlock();
8533}
8534
8535/* end of nic load/unload */
8536
8537static void bnx2x_period_task(struct work_struct *work)
8538{
8539 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
8540
8541 if (!netif_running(bp->dev))
8542 goto period_task_exit;
8543
8544 if (CHIP_REV_IS_SLOW(bp)) {
8545 BNX2X_ERR("period task called on emulation, ignoring\n");
8546 goto period_task_exit;
8547 }
8548
8549 bnx2x_acquire_phy_lock(bp);
8550 /*
8551 * The barrier is needed to ensure the ordering between the writing to
8552 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
8553 * the reading here.
8554 */
8555 smp_mb();
8556 if (bp->port.pmf) {
8557 bnx2x_period_func(&bp->link_params, &bp->link_vars);
8558
8559 /* Re-queue task in 1 sec */
8560 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
8561 }
8562
8563 bnx2x_release_phy_lock(bp);
8564period_task_exit:
8565 return;
8566}
8567
8568/*
8569 * Init service functions
8570 */
8571
8572static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
8573{
8574 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
8575 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
8576 return base + (BP_ABS_FUNC(bp)) * stride;
8577}
8578
8579static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
8580{
8581 u32 reg = bnx2x_get_pretend_reg(bp);
8582
8583 /* Flush all outstanding writes */
8584 mmiowb();
8585
8586 /* Pretend to be function 0 */
8587 REG_WR(bp, reg, 0);
8588 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
8589
8590 /* From now we are in the "like-E1" mode */
8591 bnx2x_int_disable(bp);
8592
8593 /* Flush all outstanding writes */
8594 mmiowb();
8595
8596 /* Restore the original function */
8597 REG_WR(bp, reg, BP_ABS_FUNC(bp));
8598 REG_RD(bp, reg);
8599}
8600
8601static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
8602{
8603 if (CHIP_IS_E1(bp))
8604 bnx2x_int_disable(bp);
8605 else
8606 bnx2x_undi_int_disable_e1h(bp);
8607}
8608
8609static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8610{
8611 u32 val;
8612
8613 /* Check if there is any driver already loaded */
8614 val = REG_RD(bp, MISC_REG_UNPREPARED);
8615 if (val == 0x1) {
8616
8617 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8618 /*
8619 * Check if it is the UNDI driver
8620 * UNDI driver initializes CID offset for normal bell to 0x7
8621 */
8622 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8623 if (val == 0x7) {
8624 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8625 /* save our pf_num */
8626 int orig_pf_num = bp->pf_num;
8627 int port;
8628 u32 swap_en, swap_val, value;
8629
8630 /* clear the UNDI indication */
8631 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8632
8633 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8634
8635 /* try unload UNDI on port 0 */
8636 bp->pf_num = 0;
8637 bp->fw_seq =
8638 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8639 DRV_MSG_SEQ_NUMBER_MASK);
8640 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8641
8642 /* if UNDI is loaded on the other port */
8643 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8644
8645 /* send "DONE" for previous unload */
8646 bnx2x_fw_command(bp,
8647 DRV_MSG_CODE_UNLOAD_DONE, 0);
8648
8649 /* unload UNDI on port 1 */
8650 bp->pf_num = 1;
8651 bp->fw_seq =
8652 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8653 DRV_MSG_SEQ_NUMBER_MASK);
8654 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8655
8656 bnx2x_fw_command(bp, reset_code, 0);
8657 }
8658
8659 bnx2x_undi_int_disable(bp);
8660 port = BP_PORT(bp);
8661
8662 /* close input traffic and wait for it */
8663 /* Do not rcv packets to BRB */
8664 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
8665 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8666 /* Do not direct rcv packets that are not for MCP to
8667 * the BRB */
8668 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8669 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8670 /* clear AEU */
8671 REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8672 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8673 msleep(10);
8674
8675 /* save NIG port swap info */
8676 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8677 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8678 /* reset device */
8679 REG_WR(bp,
8680 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8681 0xd3ffffff);
8682
8683 value = 0x1400;
8684 if (CHIP_IS_E3(bp)) {
8685 value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
8686 value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
8687 }
8688
8689 REG_WR(bp,
8690 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8691 value);
8692
8693 /* take the NIG out of reset and restore swap values */
8694 REG_WR(bp,
8695 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8696 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8697 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8698 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8699
8700 /* send unload done to the MCP */
8701 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
8702
8703 /* restore our func and fw_seq */
8704 bp->pf_num = orig_pf_num;
8705 bp->fw_seq =
8706 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8707 DRV_MSG_SEQ_NUMBER_MASK);
8708 }
8709
8710 /* now it's safe to release the lock */
8711 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8712 }
8713}
8714
8715static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8716{
8717 u32 val, val2, val3, val4, id;
8718 u16 pmc;
8719
8720 /* Get the chip revision id and number. */
8721 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8722 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8723 id = ((val & 0xffff) << 16);
8724 val = REG_RD(bp, MISC_REG_CHIP_REV);
8725 id |= ((val & 0xf) << 12);
8726 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8727 id |= ((val & 0xff) << 4);
8728 val = REG_RD(bp, MISC_REG_BOND_ID);
8729 id |= (val & 0xf);
8730 bp->common.chip_id = id;
8731
8732 /* Set doorbell size */
8733 bp->db_size = (1 << BNX2X_DB_SHIFT);
8734
8735 if (!CHIP_IS_E1x(bp)) {
8736 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
8737 if ((val & 1) == 0)
8738 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
8739 else
8740 val = (val >> 1) & 1;
8741 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
8742 "2_PORT_MODE");
8743 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
8744 CHIP_2_PORT_MODE;
8745
8746 if (CHIP_MODE_IS_4_PORT(bp))
8747 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
8748 else
8749 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
8750 } else {
8751 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
8752 bp->pfid = bp->pf_num; /* 0..7 */
8753 }
8754
8755 bp->link_params.chip_id = bp->common.chip_id;
8756 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8757
8758 val = (REG_RD(bp, 0x2874) & 0x55);
8759 if ((bp->common.chip_id & 0x1) ||
8760 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8761 bp->flags |= ONE_PORT_FLAG;
8762 BNX2X_DEV_INFO("single port device\n");
8763 }
8764
8765 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8766 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
8767 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8768 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8769 bp->common.flash_size, bp->common.flash_size);
8770
8771 bnx2x_init_shmem(bp);
8772
8773
8774
8775 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
8776 MISC_REG_GENERIC_CR_1 :
8777 MISC_REG_GENERIC_CR_0));
8778
8779 bp->link_params.shmem_base = bp->common.shmem_base;
8780 bp->link_params.shmem2_base = bp->common.shmem2_base;
8781 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8782 bp->common.shmem_base, bp->common.shmem2_base);
8783
8784 if (!bp->common.shmem_base) {
8785 BNX2X_DEV_INFO("MCP not active\n");
8786 bp->flags |= NO_MCP_FLAG;
8787 return;
8788 }
8789
8790 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8791 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8792
8793 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8794 SHARED_HW_CFG_LED_MODE_MASK) >>
8795 SHARED_HW_CFG_LED_MODE_SHIFT);
8796
8797 bp->link_params.feature_config_flags = 0;
8798 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8799 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8800 bp->link_params.feature_config_flags |=
8801 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8802 else
8803 bp->link_params.feature_config_flags &=
8804 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8805
8806 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8807 bp->common.bc_ver = val;
8808 BNX2X_DEV_INFO("bc_ver %X\n", val);
8809 if (val < BNX2X_BC_VER) {
8810 /* for now only warn
8811 * later we might need to enforce this */
8812 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
8813 "please upgrade BC\n", BNX2X_BC_VER, val);
8814 }
8815 bp->link_params.feature_config_flags |=
8816 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
8817 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8818
8819 bp->link_params.feature_config_flags |=
8820 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
8821 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
8822
8823 bp->link_params.feature_config_flags |=
8824 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
8825 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
8826
8827 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8828 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8829
8830 BNX2X_DEV_INFO("%sWoL capable\n",
8831 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8832
8833 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8834 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8835 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8836 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8837
8838 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
8839 val, val2, val3, val4);
8840}
8841
8842#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
8843#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
8844
8845static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8846{
8847 int pfid = BP_FUNC(bp);
8848 int igu_sb_id;
8849 u32 val;
8850 u8 fid, igu_sb_cnt = 0;
8851
8852 bp->igu_base_sb = 0xff;
8853 if (CHIP_INT_MODE_IS_BC(bp)) {
8854 int vn = BP_VN(bp);
8855 igu_sb_cnt = bp->igu_sb_cnt;
8856 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8857 FP_SB_MAX_E1x;
8858
8859 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8860 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8861
8862 return;
8863 }
8864
8865 /* IGU in normal mode - read CAM */
8866 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8867 igu_sb_id++) {
8868 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8869 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8870 continue;
8871 fid = IGU_FID(val);
8872 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8873 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8874 continue;
8875 if (IGU_VEC(val) == 0)
8876 /* default status block */
8877 bp->igu_dsb_id = igu_sb_id;
8878 else {
8879 if (bp->igu_base_sb == 0xff)
8880 bp->igu_base_sb = igu_sb_id;
8881 igu_sb_cnt++;
8882 }
8883 }
8884 }
8885
8886#ifdef CONFIG_PCI_MSI
8887 /*
8888 * It's expected that number of CAM entries for this functions is equal
8889 * to the number evaluated based on the MSI-X table size. We want a
8890 * harsh warning if these values are different!
8891 */
8892 WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
8893#endif
8894
8895 if (igu_sb_cnt == 0)
8896 BNX2X_ERR("CAM configuration error\n");
8897}
8898
8899static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8900 u32 switch_cfg)
8901{
8902 int cfg_size = 0, idx, port = BP_PORT(bp);
8903
8904 /* Aggregation of supported attributes of all external phys */
8905 bp->port.supported[0] = 0;
8906 bp->port.supported[1] = 0;
8907 switch (bp->link_params.num_phys) {
8908 case 1:
8909 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8910 cfg_size = 1;
8911 break;
8912 case 2:
8913 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8914 cfg_size = 1;
8915 break;
8916 case 3:
8917 if (bp->link_params.multi_phy_config &
8918 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8919 bp->port.supported[1] =
8920 bp->link_params.phy[EXT_PHY1].supported;
8921 bp->port.supported[0] =
8922 bp->link_params.phy[EXT_PHY2].supported;
8923 } else {
8924 bp->port.supported[0] =
8925 bp->link_params.phy[EXT_PHY1].supported;
8926 bp->port.supported[1] =
8927 bp->link_params.phy[EXT_PHY2].supported;
8928 }
8929 cfg_size = 2;
8930 break;
8931 }
8932
8933 if (!(bp->port.supported[0] || bp->port.supported[1])) {
8934 BNX2X_ERR("NVRAM config error. BAD phy config."
8935 "PHY1 config 0x%x, PHY2 config 0x%x\n",
8936 SHMEM_RD(bp,
8937 dev_info.port_hw_config[port].external_phy_config),
8938 SHMEM_RD(bp,
8939 dev_info.port_hw_config[port].external_phy_config2));
8940 return;
8941 }
8942
8943 if (CHIP_IS_E3(bp))
8944 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
8945 else {
8946 switch (switch_cfg) {
8947 case SWITCH_CFG_1G:
8948 bp->port.phy_addr = REG_RD(
8949 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
8950 break;
8951 case SWITCH_CFG_10G:
8952 bp->port.phy_addr = REG_RD(
8953 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
8954 break;
8955 default:
8956 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8957 bp->port.link_config[0]);
8958 return;
8959 }
8960 }
8961 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8962 /* mask what we support according to speed_cap_mask per configuration */
8963 for (idx = 0; idx < cfg_size; idx++) {
8964 if (!(bp->link_params.speed_cap_mask[idx] &
8965 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8966 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
8967
8968 if (!(bp->link_params.speed_cap_mask[idx] &
8969 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8970 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
8971
8972 if (!(bp->link_params.speed_cap_mask[idx] &
8973 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8974 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
8975
8976 if (!(bp->link_params.speed_cap_mask[idx] &
8977 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8978 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
8979
8980 if (!(bp->link_params.speed_cap_mask[idx] &
8981 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8982 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
8983 SUPPORTED_1000baseT_Full);
8984
8985 if (!(bp->link_params.speed_cap_mask[idx] &
8986 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8987 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
8988
8989 if (!(bp->link_params.speed_cap_mask[idx] &
8990 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8991 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8992
8993 }
8994
8995 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8996 bp->port.supported[1]);
8997}
8998
8999static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9000{
9001 u32 link_config, idx, cfg_size = 0;
9002 bp->port.advertising[0] = 0;
9003 bp->port.advertising[1] = 0;
9004 switch (bp->link_params.num_phys) {
9005 case 1:
9006 case 2:
9007 cfg_size = 1;
9008 break;
9009 case 3:
9010 cfg_size = 2;
9011 break;
9012 }
9013 for (idx = 0; idx < cfg_size; idx++) {
9014 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
9015 link_config = bp->port.link_config[idx];
9016 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9017 case PORT_FEATURE_LINK_SPEED_AUTO:
9018 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
9019 bp->link_params.req_line_speed[idx] =
9020 SPEED_AUTO_NEG;
9021 bp->port.advertising[idx] |=
9022 bp->port.supported[idx];
9023 } else {
9024 /* force 10G, no AN */
9025 bp->link_params.req_line_speed[idx] =
9026 SPEED_10000;
9027 bp->port.advertising[idx] |=
9028 (ADVERTISED_10000baseT_Full |
9029 ADVERTISED_FIBRE);
9030 continue;
9031 }
9032 break;
9033
9034 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9035 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
9036 bp->link_params.req_line_speed[idx] =
9037 SPEED_10;
9038 bp->port.advertising[idx] |=
9039 (ADVERTISED_10baseT_Full |
9040 ADVERTISED_TP);
9041 } else {
9042 BNX2X_ERR("NVRAM config error. "
9043 "Invalid link_config 0x%x"
9044 " speed_cap_mask 0x%x\n",
9045 link_config,
9046 bp->link_params.speed_cap_mask[idx]);
9047 return;
9048 }
9049 break;
9050
9051 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9052 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
9053 bp->link_params.req_line_speed[idx] =
9054 SPEED_10;
9055 bp->link_params.req_duplex[idx] =
9056 DUPLEX_HALF;
9057 bp->port.advertising[idx] |=
9058 (ADVERTISED_10baseT_Half |
9059 ADVERTISED_TP);
9060 } else {
9061 BNX2X_ERR("NVRAM config error. "
9062 "Invalid link_config 0x%x"
9063 " speed_cap_mask 0x%x\n",
9064 link_config,
9065 bp->link_params.speed_cap_mask[idx]);
9066 return;
9067 }
9068 break;
9069
9070 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9071 if (bp->port.supported[idx] &
9072 SUPPORTED_100baseT_Full) {
9073 bp->link_params.req_line_speed[idx] =
9074 SPEED_100;
9075 bp->port.advertising[idx] |=
9076 (ADVERTISED_100baseT_Full |
9077 ADVERTISED_TP);
9078 } else {
9079 BNX2X_ERR("NVRAM config error. "
9080 "Invalid link_config 0x%x"
9081 " speed_cap_mask 0x%x\n",
9082 link_config,
9083 bp->link_params.speed_cap_mask[idx]);
9084 return;
9085 }
9086 break;
9087
9088 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9089 if (bp->port.supported[idx] &
9090 SUPPORTED_100baseT_Half) {
9091 bp->link_params.req_line_speed[idx] =
9092 SPEED_100;
9093 bp->link_params.req_duplex[idx] =
9094 DUPLEX_HALF;
9095 bp->port.advertising[idx] |=
9096 (ADVERTISED_100baseT_Half |
9097 ADVERTISED_TP);
9098 } else {
9099 BNX2X_ERR("NVRAM config error. "
9100 "Invalid link_config 0x%x"
9101 " speed_cap_mask 0x%x\n",
9102 link_config,
9103 bp->link_params.speed_cap_mask[idx]);
9104 return;
9105 }
9106 break;
9107
9108 case PORT_FEATURE_LINK_SPEED_1G:
9109 if (bp->port.supported[idx] &
9110 SUPPORTED_1000baseT_Full) {
9111 bp->link_params.req_line_speed[idx] =
9112 SPEED_1000;
9113 bp->port.advertising[idx] |=
9114 (ADVERTISED_1000baseT_Full |
9115 ADVERTISED_TP);
9116 } else {
9117 BNX2X_ERR("NVRAM config error. "
9118 "Invalid link_config 0x%x"
9119 " speed_cap_mask 0x%x\n",
9120 link_config,
9121 bp->link_params.speed_cap_mask[idx]);
9122 return;
9123 }
9124 break;
9125
9126 case PORT_FEATURE_LINK_SPEED_2_5G:
9127 if (bp->port.supported[idx] &
9128 SUPPORTED_2500baseX_Full) {
9129 bp->link_params.req_line_speed[idx] =
9130 SPEED_2500;
9131 bp->port.advertising[idx] |=
9132 (ADVERTISED_2500baseX_Full |
9133 ADVERTISED_TP);
9134 } else {
9135 BNX2X_ERR("NVRAM config error. "
9136 "Invalid link_config 0x%x"
9137 " speed_cap_mask 0x%x\n",
9138 link_config,
9139 bp->link_params.speed_cap_mask[idx]);
9140 return;
9141 }
9142 break;
9143
9144 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9145 if (bp->port.supported[idx] &
9146 SUPPORTED_10000baseT_Full) {
9147 bp->link_params.req_line_speed[idx] =
9148 SPEED_10000;
9149 bp->port.advertising[idx] |=
9150 (ADVERTISED_10000baseT_Full |
9151 ADVERTISED_FIBRE);
9152 } else {
9153 BNX2X_ERR("NVRAM config error. "
9154 "Invalid link_config 0x%x"
9155 " speed_cap_mask 0x%x\n",
9156 link_config,
9157 bp->link_params.speed_cap_mask[idx]);
9158 return;
9159 }
9160 break;
9161 case PORT_FEATURE_LINK_SPEED_20G:
9162 bp->link_params.req_line_speed[idx] = SPEED_20000;
9163
9164 break;
9165 default:
9166 BNX2X_ERR("NVRAM config error. "
9167 "BAD link speed link_config 0x%x\n",
9168 link_config);
9169 bp->link_params.req_line_speed[idx] =
9170 SPEED_AUTO_NEG;
9171 bp->port.advertising[idx] =
9172 bp->port.supported[idx];
9173 break;
9174 }
9175
9176 bp->link_params.req_flow_ctrl[idx] = (link_config &
9177 PORT_FEATURE_FLOW_CONTROL_MASK);
9178 if ((bp->link_params.req_flow_ctrl[idx] ==
9179 BNX2X_FLOW_CTRL_AUTO) &&
9180 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
9181 bp->link_params.req_flow_ctrl[idx] =
9182 BNX2X_FLOW_CTRL_NONE;
9183 }
9184
9185 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
9186 " 0x%x advertising 0x%x\n",
9187 bp->link_params.req_line_speed[idx],
9188 bp->link_params.req_duplex[idx],
9189 bp->link_params.req_flow_ctrl[idx],
9190 bp->port.advertising[idx]);
9191 }
9192}
9193
9194static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9195{
9196 mac_hi = cpu_to_be16(mac_hi);
9197 mac_lo = cpu_to_be32(mac_lo);
9198 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9199 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9200}
9201
9202static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9203{
9204 int port = BP_PORT(bp);
9205 u32 config;
9206 u32 ext_phy_type, ext_phy_config;
9207
9208 bp->link_params.bp = bp;
9209 bp->link_params.port = port;
9210
9211 bp->link_params.lane_config =
9212 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9213
9214 bp->link_params.speed_cap_mask[0] =
9215 SHMEM_RD(bp,
9216 dev_info.port_hw_config[port].speed_capability_mask);
9217 bp->link_params.speed_cap_mask[1] =
9218 SHMEM_RD(bp,
9219 dev_info.port_hw_config[port].speed_capability_mask2);
9220 bp->port.link_config[0] =
9221 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9222
9223 bp->port.link_config[1] =
9224 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
9225
9226 bp->link_params.multi_phy_config =
9227 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
9228 /* If the device is capable of WoL, set the default state according
9229 * to the HW
9230 */
9231 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9232 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9233 (config & PORT_FEATURE_WOL_ENABLED));
9234
9235 BNX2X_DEV_INFO("lane_config 0x%08x "
9236 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
9237 bp->link_params.lane_config,
9238 bp->link_params.speed_cap_mask[0],
9239 bp->port.link_config[0]);
9240
9241 bp->link_params.switch_cfg = (bp->port.link_config[0] &
9242 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9243 bnx2x_phy_probe(&bp->link_params);
9244 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9245
9246 bnx2x_link_settings_requested(bp);
9247
9248 /*
9249 * If connected directly, work with the internal PHY, otherwise, work
9250 * with the external PHY
9251 */
9252 ext_phy_config =
9253 SHMEM_RD(bp,
9254 dev_info.port_hw_config[port].external_phy_config);
9255 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
9256 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9257 bp->mdio.prtad = bp->port.phy_addr;
9258
9259 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9260 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9261 bp->mdio.prtad =
9262 XGXS_EXT_PHY_ADDR(ext_phy_config);
9263
9264 /*
9265 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
9266 * In MF mode, it is set to cover self test cases
9267 */
9268 if (IS_MF(bp))
9269 bp->port.need_hw_lock = 1;
9270 else
9271 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
9272 bp->common.shmem_base,
9273 bp->common.shmem2_base);
9274}
9275
9276#ifdef BCM_CNIC
9277static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
9278{
9279 int port = BP_PORT(bp);
9280 int func = BP_ABS_FUNC(bp);
9281
9282 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
9283 drv_lic_key[port].max_iscsi_conn);
9284 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
9285 drv_lic_key[port].max_fcoe_conn);
9286
9287 /* Get the number of maximum allowed iSCSI and FCoE connections */
9288 bp->cnic_eth_dev.max_iscsi_conn =
9289 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
9290 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
9291
9292 bp->cnic_eth_dev.max_fcoe_conn =
9293 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
9294 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
9295
9296 /* Read the WWN: */
9297 if (!IS_MF(bp)) {
9298 /* Port info */
9299 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
9300 SHMEM_RD(bp,
9301 dev_info.port_hw_config[port].
9302 fcoe_wwn_port_name_upper);
9303 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
9304 SHMEM_RD(bp,
9305 dev_info.port_hw_config[port].
9306 fcoe_wwn_port_name_lower);
9307
9308 /* Node info */
9309 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
9310 SHMEM_RD(bp,
9311 dev_info.port_hw_config[port].
9312 fcoe_wwn_node_name_upper);
9313 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
9314 SHMEM_RD(bp,
9315 dev_info.port_hw_config[port].
9316 fcoe_wwn_node_name_lower);
9317 } else if (!IS_MF_SD(bp)) {
9318 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
9319
9320 /*
9321 * Read the WWN info only if the FCoE feature is enabled for
9322 * this function.
9323 */
9324 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
9325 /* Port info */
9326 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
9327 MF_CFG_RD(bp, func_ext_config[func].
9328 fcoe_wwn_port_name_upper);
9329 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
9330 MF_CFG_RD(bp, func_ext_config[func].
9331 fcoe_wwn_port_name_lower);
9332
9333 /* Node info */
9334 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
9335 MF_CFG_RD(bp, func_ext_config[func].
9336 fcoe_wwn_node_name_upper);
9337 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
9338 MF_CFG_RD(bp, func_ext_config[func].
9339 fcoe_wwn_node_name_lower);
9340 }
9341 }
9342
9343 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
9344 bp->cnic_eth_dev.max_iscsi_conn,
9345 bp->cnic_eth_dev.max_fcoe_conn);
9346
9347 /*
9348 * If maximum allowed number of connections is zero -
9349 * disable the feature.
9350 */
9351 if (!bp->cnic_eth_dev.max_iscsi_conn)
9352 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
9353
9354 if (!bp->cnic_eth_dev.max_fcoe_conn)
9355 bp->flags |= NO_FCOE_FLAG;
9356}
9357#endif
9358
9359static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
9360{
9361 u32 val, val2;
9362 int func = BP_ABS_FUNC(bp);
9363 int port = BP_PORT(bp);
9364#ifdef BCM_CNIC
9365 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
9366 u8 *fip_mac = bp->fip_mac;
9367#endif
9368
9369 /* Zero primary MAC configuration */
9370 memset(bp->dev->dev_addr, 0, ETH_ALEN);
9371
9372 if (BP_NOMCP(bp)) {
9373 BNX2X_ERROR("warning: random MAC workaround active\n");
9374 random_ether_addr(bp->dev->dev_addr);
9375 } else if (IS_MF(bp)) {
9376 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
9377 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
9378 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9379 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
9380 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9381
9382#ifdef BCM_CNIC
9383 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
9384 * FCoE MAC then the appropriate feature should be disabled.
9385 */
9386 if (IS_MF_SI(bp)) {
9387 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
9388 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
9389 val2 = MF_CFG_RD(bp, func_ext_config[func].
9390 iscsi_mac_addr_upper);
9391 val = MF_CFG_RD(bp, func_ext_config[func].
9392 iscsi_mac_addr_lower);
9393 bnx2x_set_mac_buf(iscsi_mac, val, val2);
9394 BNX2X_DEV_INFO("Read iSCSI MAC: "
9395 BNX2X_MAC_FMT"\n",
9396 BNX2X_MAC_PRN_LIST(iscsi_mac));
9397 } else
9398 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
9399
9400 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
9401 val2 = MF_CFG_RD(bp, func_ext_config[func].
9402 fcoe_mac_addr_upper);
9403 val = MF_CFG_RD(bp, func_ext_config[func].
9404 fcoe_mac_addr_lower);
9405 bnx2x_set_mac_buf(fip_mac, val, val2);
9406 BNX2X_DEV_INFO("Read FCoE L2 MAC to "
9407 BNX2X_MAC_FMT"\n",
9408 BNX2X_MAC_PRN_LIST(fip_mac));
9409
9410 } else
9411 bp->flags |= NO_FCOE_FLAG;
9412 }
9413#endif
9414 } else {
9415 /* in SF read MACs from port configuration */
9416 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9417 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9418 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9419
9420#ifdef BCM_CNIC
9421 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
9422 iscsi_mac_upper);
9423 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
9424 iscsi_mac_lower);
9425 bnx2x_set_mac_buf(iscsi_mac, val, val2);
9426
9427 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
9428 fcoe_fip_mac_upper);
9429 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
9430 fcoe_fip_mac_lower);
9431 bnx2x_set_mac_buf(fip_mac, val, val2);
9432#endif
9433 }
9434
9435 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9436 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9437
9438#ifdef BCM_CNIC
9439 /* Set the FCoE MAC in MF_SD mode */
9440 if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
9441 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
9442
9443 /* Disable iSCSI if MAC configuration is
9444 * invalid.
9445 */
9446 if (!is_valid_ether_addr(iscsi_mac)) {
9447 bp->flags |= NO_ISCSI_FLAG;
9448 memset(iscsi_mac, 0, ETH_ALEN);
9449 }
9450
9451 /* Disable FCoE if MAC configuration is
9452 * invalid.
9453 */
9454 if (!is_valid_ether_addr(fip_mac)) {
9455 bp->flags |= NO_FCOE_FLAG;
9456 memset(bp->fip_mac, 0, ETH_ALEN);
9457 }
9458#endif
9459
9460 if (!is_valid_ether_addr(bp->dev->dev_addr))
9461 dev_err(&bp->pdev->dev,
9462 "bad Ethernet MAC address configuration: "
9463 BNX2X_MAC_FMT", change it manually before bringing up "
9464 "the appropriate network interface\n",
9465 BNX2X_MAC_PRN_LIST(bp->dev->dev_addr));
9466}
9467
9468static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9469{
9470 int /*abs*/func = BP_ABS_FUNC(bp);
9471 int vn;
9472 u32 val = 0;
9473 int rc = 0;
9474
9475 bnx2x_get_common_hwinfo(bp);
9476
9477 /*
9478 * initialize IGU parameters
9479 */
9480 if (CHIP_IS_E1x(bp)) {
9481 bp->common.int_block = INT_BLOCK_HC;
9482
9483 bp->igu_dsb_id = DEF_SB_IGU_ID;
9484 bp->igu_base_sb = 0;
9485 } else {
9486 bp->common.int_block = INT_BLOCK_IGU;
9487
9488 /* do not allow device reset during IGU info preocessing */
9489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9490
9491 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9492
9493 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
9494 int tout = 5000;
9495
9496 BNX2X_DEV_INFO("FORCING Normal Mode\n");
9497
9498 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
9499 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
9500 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
9501
9502 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
9503 tout--;
9504 usleep_range(1000, 1000);
9505 }
9506
9507 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
9508 dev_err(&bp->pdev->dev,
9509 "FORCING Normal Mode failed!!!\n");
9510 return -EPERM;
9511 }
9512 }
9513
9514 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
9515 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
9516 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
9517 } else
9518 BNX2X_DEV_INFO("IGU Normal Mode\n");
9519
9520 bnx2x_get_igu_cam_info(bp);
9521
9522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9523 }
9524
9525 /*
9526 * set base FW non-default (fast path) status block id, this value is
9527 * used to initialize the fw_sb_id saved on the fp/queue structure to
9528 * determine the id used by the FW.
9529 */
9530 if (CHIP_IS_E1x(bp))
9531 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
9532 else /*
9533 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
9534 * the same queue are indicated on the same IGU SB). So we prefer
9535 * FW and IGU SBs to be the same value.
9536 */
9537 bp->base_fw_ndsb = bp->igu_base_sb;
9538
9539 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
9540 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
9541 bp->igu_sb_cnt, bp->base_fw_ndsb);
9542
9543 /*
9544 * Initialize MF configuration
9545 */
9546
9547 bp->mf_ov = 0;
9548 bp->mf_mode = 0;
9549 vn = BP_VN(bp);
9550
9551 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
9552 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
9553 bp->common.shmem2_base, SHMEM2_RD(bp, size),
9554 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
9555
9556 if (SHMEM2_HAS(bp, mf_cfg_addr))
9557 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
9558 else
9559 bp->common.mf_cfg_base = bp->common.shmem_base +
9560 offsetof(struct shmem_region, func_mb) +
9561 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
9562 /*
9563 * get mf configuration:
9564 * 1. existence of MF configuration
9565 * 2. MAC address must be legal (check only upper bytes)
9566 * for Switch-Independent mode;
9567 * OVLAN must be legal for Switch-Dependent mode
9568 * 3. SF_MODE configures specific MF mode
9569 */
9570 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
9571 /* get mf configuration */
9572 val = SHMEM_RD(bp,
9573 dev_info.shared_feature_config.config);
9574 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
9575
9576 switch (val) {
9577 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
9578 val = MF_CFG_RD(bp, func_mf_config[func].
9579 mac_upper);
9580 /* check for legal mac (upper bytes)*/
9581 if (val != 0xffff) {
9582 bp->mf_mode = MULTI_FUNCTION_SI;
9583 bp->mf_config[vn] = MF_CFG_RD(bp,
9584 func_mf_config[func].config);
9585 } else
9586 BNX2X_DEV_INFO("illegal MAC address "
9587 "for SI\n");
9588 break;
9589 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
9590 /* get OV configuration */
9591 val = MF_CFG_RD(bp,
9592 func_mf_config[FUNC_0].e1hov_tag);
9593 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
9594
9595 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9596 bp->mf_mode = MULTI_FUNCTION_SD;
9597 bp->mf_config[vn] = MF_CFG_RD(bp,
9598 func_mf_config[func].config);
9599 } else
9600 BNX2X_DEV_INFO("illegal OV for SD\n");
9601 break;
9602 default:
9603 /* Unknown configuration: reset mf_config */
9604 bp->mf_config[vn] = 0;
9605 BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
9606 }
9607 }
9608
9609 BNX2X_DEV_INFO("%s function mode\n",
9610 IS_MF(bp) ? "multi" : "single");
9611
9612 switch (bp->mf_mode) {
9613 case MULTI_FUNCTION_SD:
9614 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
9615 FUNC_MF_CFG_E1HOV_TAG_MASK;
9616 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9617 bp->mf_ov = val;
9618 bp->path_has_ovlan = true;
9619
9620 BNX2X_DEV_INFO("MF OV for func %d is %d "
9621 "(0x%04x)\n", func, bp->mf_ov,
9622 bp->mf_ov);
9623 } else {
9624 dev_err(&bp->pdev->dev,
9625 "No valid MF OV for func %d, "
9626 "aborting\n", func);
9627 return -EPERM;
9628 }
9629 break;
9630 case MULTI_FUNCTION_SI:
9631 BNX2X_DEV_INFO("func %d is in MF "
9632 "switch-independent mode\n", func);
9633 break;
9634 default:
9635 if (vn) {
9636 dev_err(&bp->pdev->dev,
9637 "VN %d is in a single function mode, "
9638 "aborting\n", vn);
9639 return -EPERM;
9640 }
9641 break;
9642 }
9643
9644 /* check if other port on the path needs ovlan:
9645 * Since MF configuration is shared between ports
9646 * Possible mixed modes are only
9647 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
9648 */
9649 if (CHIP_MODE_IS_4_PORT(bp) &&
9650 !bp->path_has_ovlan &&
9651 !IS_MF(bp) &&
9652 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
9653 u8 other_port = !BP_PORT(bp);
9654 u8 other_func = BP_PATH(bp) + 2*other_port;
9655 val = MF_CFG_RD(bp,
9656 func_mf_config[other_func].e1hov_tag);
9657 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9658 bp->path_has_ovlan = true;
9659 }
9660 }
9661
9662 /* adjust igu_sb_cnt to MF for E1x */
9663 if (CHIP_IS_E1x(bp) && IS_MF(bp))
9664 bp->igu_sb_cnt /= E1HVN_MAX;
9665
9666 /* port info */
9667 bnx2x_get_port_hwinfo(bp);
9668
9669 /* Get MAC addresses */
9670 bnx2x_get_mac_hwinfo(bp);
9671
9672#ifdef BCM_CNIC
9673 bnx2x_get_cnic_info(bp);
9674#endif
9675
9676 /* Get current FW pulse sequence */
9677 if (!BP_NOMCP(bp)) {
9678 int mb_idx = BP_FW_MB_IDX(bp);
9679
9680 bp->fw_drv_pulse_wr_seq =
9681 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
9682 DRV_PULSE_SEQ_MASK);
9683 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
9684 }
9685
9686 return rc;
9687}
9688
9689static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9690{
9691 int cnt, i, block_end, rodi;
9692 char vpd_data[BNX2X_VPD_LEN+1];
9693 char str_id_reg[VENDOR_ID_LEN+1];
9694 char str_id_cap[VENDOR_ID_LEN+1];
9695 u8 len;
9696
9697 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9698 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9699
9700 if (cnt < BNX2X_VPD_LEN)
9701 goto out_not_found;
9702
9703 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9704 PCI_VPD_LRDT_RO_DATA);
9705 if (i < 0)
9706 goto out_not_found;
9707
9708
9709 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9710 pci_vpd_lrdt_size(&vpd_data[i]);
9711
9712 i += PCI_VPD_LRDT_TAG_SIZE;
9713
9714 if (block_end > BNX2X_VPD_LEN)
9715 goto out_not_found;
9716
9717 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9718 PCI_VPD_RO_KEYWORD_MFR_ID);
9719 if (rodi < 0)
9720 goto out_not_found;
9721
9722 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9723
9724 if (len != VENDOR_ID_LEN)
9725 goto out_not_found;
9726
9727 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9728
9729 /* vendor specific info */
9730 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9731 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9732 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9733 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9734
9735 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9736 PCI_VPD_RO_KEYWORD_VENDOR0);
9737 if (rodi >= 0) {
9738 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9739
9740 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9741
9742 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9743 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9744 bp->fw_ver[len] = ' ';
9745 }
9746 }
9747 return;
9748 }
9749out_not_found:
9750 return;
9751}
9752
9753static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
9754{
9755 u32 flags = 0;
9756
9757 if (CHIP_REV_IS_FPGA(bp))
9758 SET_FLAGS(flags, MODE_FPGA);
9759 else if (CHIP_REV_IS_EMUL(bp))
9760 SET_FLAGS(flags, MODE_EMUL);
9761 else
9762 SET_FLAGS(flags, MODE_ASIC);
9763
9764 if (CHIP_MODE_IS_4_PORT(bp))
9765 SET_FLAGS(flags, MODE_PORT4);
9766 else
9767 SET_FLAGS(flags, MODE_PORT2);
9768
9769 if (CHIP_IS_E2(bp))
9770 SET_FLAGS(flags, MODE_E2);
9771 else if (CHIP_IS_E3(bp)) {
9772 SET_FLAGS(flags, MODE_E3);
9773 if (CHIP_REV(bp) == CHIP_REV_Ax)
9774 SET_FLAGS(flags, MODE_E3_A0);
9775 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
9776 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
9777 }
9778
9779 if (IS_MF(bp)) {
9780 SET_FLAGS(flags, MODE_MF);
9781 switch (bp->mf_mode) {
9782 case MULTI_FUNCTION_SD:
9783 SET_FLAGS(flags, MODE_MF_SD);
9784 break;
9785 case MULTI_FUNCTION_SI:
9786 SET_FLAGS(flags, MODE_MF_SI);
9787 break;
9788 }
9789 } else
9790 SET_FLAGS(flags, MODE_SF);
9791
9792#if defined(__LITTLE_ENDIAN)
9793 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
9794#else /*(__BIG_ENDIAN)*/
9795 SET_FLAGS(flags, MODE_BIG_ENDIAN);
9796#endif
9797 INIT_MODE_FLAGS(bp) = flags;
9798}
9799
9800static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9801{
9802 int func;
9803 int timer_interval;
9804 int rc;
9805
9806 mutex_init(&bp->port.phy_mutex);
9807 mutex_init(&bp->fw_mb_mutex);
9808 spin_lock_init(&bp->stats_lock);
9809#ifdef BCM_CNIC
9810 mutex_init(&bp->cnic_mutex);
9811#endif
9812
9813 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9814 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
9815 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
9816 rc = bnx2x_get_hwinfo(bp);
9817 if (rc)
9818 return rc;
9819
9820 bnx2x_set_modes_bitmap(bp);
9821
9822 rc = bnx2x_alloc_mem_bp(bp);
9823 if (rc)
9824 return rc;
9825
9826 bnx2x_read_fwinfo(bp);
9827
9828 func = BP_FUNC(bp);
9829
9830 /* need to reset chip if undi was active */
9831 if (!BP_NOMCP(bp))
9832 bnx2x_undi_unload(bp);
9833
9834 /* init fw_seq after undi_unload! */
9835 if (!BP_NOMCP(bp)) {
9836 bp->fw_seq =
9837 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9838 DRV_MSG_SEQ_NUMBER_MASK);
9839 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9840 }
9841
9842 if (CHIP_REV_IS_FPGA(bp))
9843 dev_err(&bp->pdev->dev, "FPGA detected\n");
9844
9845 if (BP_NOMCP(bp) && (func == 0))
9846 dev_err(&bp->pdev->dev, "MCP disabled, "
9847 "must load devices in order!\n");
9848
9849 bp->multi_mode = multi_mode;
9850
9851 /* Set TPA flags */
9852 if (disable_tpa) {
9853 bp->flags &= ~TPA_ENABLE_FLAG;
9854 bp->dev->features &= ~NETIF_F_LRO;
9855 } else {
9856 bp->flags |= TPA_ENABLE_FLAG;
9857 bp->dev->features |= NETIF_F_LRO;
9858 }
9859 bp->disable_tpa = disable_tpa;
9860
9861 if (CHIP_IS_E1(bp))
9862 bp->dropless_fc = 0;
9863 else
9864 bp->dropless_fc = dropless_fc;
9865
9866 bp->mrrs = mrrs;
9867
9868 bp->tx_ring_size = MAX_TX_AVAIL;
9869
9870 /* make sure that the numbers are in the right granularity */
9871 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
9872 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
9873
9874 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9875 bp->current_interval = (poll ? poll : timer_interval);
9876
9877 init_timer(&bp->timer);
9878 bp->timer.expires = jiffies + bp->current_interval;
9879 bp->timer.data = (unsigned long) bp;
9880 bp->timer.function = bnx2x_timer;
9881
9882 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
9883 bnx2x_dcbx_init_params(bp);
9884
9885#ifdef BCM_CNIC
9886 if (CHIP_IS_E1x(bp))
9887 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
9888 else
9889 bp->cnic_base_cl_id = FP_SB_MAX_E2;
9890#endif
9891
9892 /* multiple tx priority */
9893 if (CHIP_IS_E1x(bp))
9894 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
9895 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
9896 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
9897 if (CHIP_IS_E3B0(bp))
9898 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
9899
9900 return rc;
9901}
9902
9903
9904/****************************************************************************
9905* General service functions
9906****************************************************************************/
9907
9908/*
9909 * net_device service functions
9910 */
9911
9912/* called with rtnl_lock */
9913static int bnx2x_open(struct net_device *dev)
9914{
9915 struct bnx2x *bp = netdev_priv(dev);
9916 bool global = false;
9917 int other_engine = BP_PATH(bp) ? 0 : 1;
9918 u32 other_load_counter, load_counter;
9919
9920 netif_carrier_off(dev);
9921
9922 bnx2x_set_power_state(bp, PCI_D0);
9923
9924 other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
9925 load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
9926
9927 /*
9928 * If parity had happen during the unload, then attentions
9929 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
9930 * want the first function loaded on the current engine to
9931 * complete the recovery.
9932 */
9933 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
9934 bnx2x_chk_parity_attn(bp, &global, true))
9935 do {
9936 /*
9937 * If there are attentions and they are in a global
9938 * blocks, set the GLOBAL_RESET bit regardless whether
9939 * it will be this function that will complete the
9940 * recovery or not.
9941 */
9942 if (global)
9943 bnx2x_set_reset_global(bp);
9944
9945 /*
9946 * Only the first function on the current engine should
9947 * try to recover in open. In case of attentions in
9948 * global blocks only the first in the chip should try
9949 * to recover.
9950 */
9951 if ((!load_counter &&
9952 (!global || !other_load_counter)) &&
9953 bnx2x_trylock_leader_lock(bp) &&
9954 !bnx2x_leader_reset(bp)) {
9955 netdev_info(bp->dev, "Recovered in open\n");
9956 break;
9957 }
9958
9959 /* recovery has failed... */
9960 bnx2x_set_power_state(bp, PCI_D3hot);
9961 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9962
9963 netdev_err(bp->dev, "Recovery flow hasn't been properly"
9964 " completed yet. Try again later. If u still see this"
9965 " message after a few retries then power cycle is"
9966 " required.\n");
9967
9968 return -EAGAIN;
9969 } while (0);
9970
9971 bp->recovery_state = BNX2X_RECOVERY_DONE;
9972 return bnx2x_nic_load(bp, LOAD_OPEN);
9973}
9974
9975/* called with rtnl_lock */
9976static int bnx2x_close(struct net_device *dev)
9977{
9978 struct bnx2x *bp = netdev_priv(dev);
9979
9980 /* Unload the driver, release IRQs */
9981 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9982
9983 /* Power off */
9984 bnx2x_set_power_state(bp, PCI_D3hot);
9985
9986 return 0;
9987}
9988
9989static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
9990 struct bnx2x_mcast_ramrod_params *p)
9991{
9992 int mc_count = netdev_mc_count(bp->dev);
9993 struct bnx2x_mcast_list_elem *mc_mac =
9994 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
9995 struct netdev_hw_addr *ha;
9996
9997 if (!mc_mac)
9998 return -ENOMEM;
9999
10000 INIT_LIST_HEAD(&p->mcast_list);
10001
10002 netdev_for_each_mc_addr(ha, bp->dev) {
10003 mc_mac->mac = bnx2x_mc_addr(ha);
10004 list_add_tail(&mc_mac->link, &p->mcast_list);
10005 mc_mac++;
10006 }
10007
10008 p->mcast_list_len = mc_count;
10009
10010 return 0;
10011}
10012
10013static inline void bnx2x_free_mcast_macs_list(
10014 struct bnx2x_mcast_ramrod_params *p)
10015{
10016 struct bnx2x_mcast_list_elem *mc_mac =
10017 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
10018 link);
10019
10020 WARN_ON(!mc_mac);
10021 kfree(mc_mac);
10022}
10023
10024/**
10025 * bnx2x_set_uc_list - configure a new unicast MACs list.
10026 *
10027 * @bp: driver handle
10028 *
10029 * We will use zero (0) as a MAC type for these MACs.
10030 */
10031static inline int bnx2x_set_uc_list(struct bnx2x *bp)
10032{
10033 int rc;
10034 struct net_device *dev = bp->dev;
10035 struct netdev_hw_addr *ha;
10036 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
10037 unsigned long ramrod_flags = 0;
10038
10039 /* First schedule a cleanup up of old configuration */
10040 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
10041 if (rc < 0) {
10042 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
10043 return rc;
10044 }
10045
10046 netdev_for_each_uc_addr(ha, dev) {
10047 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
10048 BNX2X_UC_LIST_MAC, &ramrod_flags);
10049 if (rc < 0) {
10050 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
10051 rc);
10052 return rc;
10053 }
10054 }
10055
10056 /* Execute the pending commands */
10057 __set_bit(RAMROD_CONT, &ramrod_flags);
10058 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
10059 BNX2X_UC_LIST_MAC, &ramrod_flags);
10060}
10061
10062static inline int bnx2x_set_mc_list(struct bnx2x *bp)
10063{
10064 struct net_device *dev = bp->dev;
10065 struct bnx2x_mcast_ramrod_params rparam = {0};
10066 int rc = 0;
10067
10068 rparam.mcast_obj = &bp->mcast_obj;
10069
10070 /* first, clear all configured multicast MACs */
10071 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
10072 if (rc < 0) {
10073 BNX2X_ERR("Failed to clear multicast "
10074 "configuration: %d\n", rc);
10075 return rc;
10076 }
10077
10078 /* then, configure a new MACs list */
10079 if (netdev_mc_count(dev)) {
10080 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
10081 if (rc) {
10082 BNX2X_ERR("Failed to create multicast MACs "
10083 "list: %d\n", rc);
10084 return rc;
10085 }
10086
10087 /* Now add the new MACs */
10088 rc = bnx2x_config_mcast(bp, &rparam,
10089 BNX2X_MCAST_CMD_ADD);
10090 if (rc < 0)
10091 BNX2X_ERR("Failed to set a new multicast "
10092 "configuration: %d\n", rc);
10093
10094 bnx2x_free_mcast_macs_list(&rparam);
10095 }
10096
10097 return rc;
10098}
10099
10100
10101/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
10102void bnx2x_set_rx_mode(struct net_device *dev)
10103{
10104 struct bnx2x *bp = netdev_priv(dev);
10105 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10106
10107 if (bp->state != BNX2X_STATE_OPEN) {
10108 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10109 return;
10110 }
10111
10112 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
10113
10114 if (dev->flags & IFF_PROMISC)
10115 rx_mode = BNX2X_RX_MODE_PROMISC;
10116 else if ((dev->flags & IFF_ALLMULTI) ||
10117 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
10118 CHIP_IS_E1(bp)))
10119 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10120 else {
10121 /* some multicasts */
10122 if (bnx2x_set_mc_list(bp) < 0)
10123 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10124
10125 if (bnx2x_set_uc_list(bp) < 0)
10126 rx_mode = BNX2X_RX_MODE_PROMISC;
10127 }
10128
10129 bp->rx_mode = rx_mode;
10130
10131 /* Schedule the rx_mode command */
10132 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
10133 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
10134 return;
10135 }
10136
10137 bnx2x_set_storm_rx_mode(bp);
10138}
10139
10140/* called with rtnl_lock */
10141static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
10142 int devad, u16 addr)
10143{
10144 struct bnx2x *bp = netdev_priv(netdev);
10145 u16 value;
10146 int rc;
10147
10148 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
10149 prtad, devad, addr);
10150
10151 /* The HW expects different devad if CL22 is used */
10152 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
10153
10154 bnx2x_acquire_phy_lock(bp);
10155 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
10156 bnx2x_release_phy_lock(bp);
10157 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
10158
10159 if (!rc)
10160 rc = value;
10161 return rc;
10162}
10163
10164/* called with rtnl_lock */
10165static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
10166 u16 addr, u16 value)
10167{
10168 struct bnx2x *bp = netdev_priv(netdev);
10169 int rc;
10170
10171 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
10172 " value 0x%x\n", prtad, devad, addr, value);
10173
10174 /* The HW expects different devad if CL22 is used */
10175 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
10176
10177 bnx2x_acquire_phy_lock(bp);
10178 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
10179 bnx2x_release_phy_lock(bp);
10180 return rc;
10181}
10182
10183/* called with rtnl_lock */
10184static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10185{
10186 struct bnx2x *bp = netdev_priv(dev);
10187 struct mii_ioctl_data *mdio = if_mii(ifr);
10188
10189 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
10190 mdio->phy_id, mdio->reg_num, mdio->val_in);
10191
10192 if (!netif_running(dev))
10193 return -EAGAIN;
10194
10195 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
10196}
10197
10198#ifdef CONFIG_NET_POLL_CONTROLLER
10199static void poll_bnx2x(struct net_device *dev)
10200{
10201 struct bnx2x *bp = netdev_priv(dev);
10202
10203 disable_irq(bp->pdev->irq);
10204 bnx2x_interrupt(bp->pdev->irq, dev);
10205 enable_irq(bp->pdev->irq);
10206}
10207#endif
10208
10209static const struct net_device_ops bnx2x_netdev_ops = {
10210 .ndo_open = bnx2x_open,
10211 .ndo_stop = bnx2x_close,
10212 .ndo_start_xmit = bnx2x_start_xmit,
10213 .ndo_select_queue = bnx2x_select_queue,
10214 .ndo_set_rx_mode = bnx2x_set_rx_mode,
10215 .ndo_set_mac_address = bnx2x_change_mac_addr,
10216 .ndo_validate_addr = eth_validate_addr,
10217 .ndo_do_ioctl = bnx2x_ioctl,
10218 .ndo_change_mtu = bnx2x_change_mtu,
10219 .ndo_fix_features = bnx2x_fix_features,
10220 .ndo_set_features = bnx2x_set_features,
10221 .ndo_tx_timeout = bnx2x_tx_timeout,
10222#ifdef CONFIG_NET_POLL_CONTROLLER
10223 .ndo_poll_controller = poll_bnx2x,
10224#endif
10225 .ndo_setup_tc = bnx2x_setup_tc,
10226
10227#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
10228 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
10229#endif
10230};
10231
10232static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
10233{
10234 struct device *dev = &bp->pdev->dev;
10235
10236 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
10237 bp->flags |= USING_DAC_FLAG;
10238 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
10239 dev_err(dev, "dma_set_coherent_mask failed, "
10240 "aborting\n");
10241 return -EIO;
10242 }
10243 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
10244 dev_err(dev, "System does not support DMA, aborting\n");
10245 return -EIO;
10246 }
10247
10248 return 0;
10249}
10250
10251static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10252 struct net_device *dev,
10253 unsigned long board_type)
10254{
10255 struct bnx2x *bp;
10256 int rc;
10257
10258 SET_NETDEV_DEV(dev, &pdev->dev);
10259 bp = netdev_priv(dev);
10260
10261 bp->dev = dev;
10262 bp->pdev = pdev;
10263 bp->flags = 0;
10264 bp->pf_num = PCI_FUNC(pdev->devfn);
10265
10266 rc = pci_enable_device(pdev);
10267 if (rc) {
10268 dev_err(&bp->pdev->dev,
10269 "Cannot enable PCI device, aborting\n");
10270 goto err_out;
10271 }
10272
10273 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10274 dev_err(&bp->pdev->dev,
10275 "Cannot find PCI device base address, aborting\n");
10276 rc = -ENODEV;
10277 goto err_out_disable;
10278 }
10279
10280 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10281 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
10282 " base address, aborting\n");
10283 rc = -ENODEV;
10284 goto err_out_disable;
10285 }
10286
10287 if (atomic_read(&pdev->enable_cnt) == 1) {
10288 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10289 if (rc) {
10290 dev_err(&bp->pdev->dev,
10291 "Cannot obtain PCI resources, aborting\n");
10292 goto err_out_disable;
10293 }
10294
10295 pci_set_master(pdev);
10296 pci_save_state(pdev);
10297 }
10298
10299 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10300 if (bp->pm_cap == 0) {
10301 dev_err(&bp->pdev->dev,
10302 "Cannot find power management capability, aborting\n");
10303 rc = -EIO;
10304 goto err_out_release;
10305 }
10306
10307 if (!pci_is_pcie(pdev)) {
10308 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
10309 rc = -EIO;
10310 goto err_out_release;
10311 }
10312
10313 rc = bnx2x_set_coherency_mask(bp);
10314 if (rc)
10315 goto err_out_release;
10316
10317 dev->mem_start = pci_resource_start(pdev, 0);
10318 dev->base_addr = dev->mem_start;
10319 dev->mem_end = pci_resource_end(pdev, 0);
10320
10321 dev->irq = pdev->irq;
10322
10323 bp->regview = pci_ioremap_bar(pdev, 0);
10324 if (!bp->regview) {
10325 dev_err(&bp->pdev->dev,
10326 "Cannot map register space, aborting\n");
10327 rc = -ENOMEM;
10328 goto err_out_release;
10329 }
10330
10331 bnx2x_set_power_state(bp, PCI_D0);
10332
10333 /* clean indirect addresses */
10334 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10335 PCICFG_VENDOR_ID_OFFSET);
10336 /*
10337 * Clean the following indirect addresses for all functions since it
10338 * is not used by the driver.
10339 */
10340 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10341 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10342 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10343 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10344
10345 if (CHIP_IS_E1x(bp)) {
10346 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10347 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10348 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10349 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10350 }
10351
10352 /*
10353 * Enable internal target-read (in case we are probed after PF FLR).
10354 * Must be done prior to any BAR read access. Only for 57712 and up
10355 */
10356 if (board_type != BCM57710 &&
10357 board_type != BCM57711 &&
10358 board_type != BCM57711E)
10359 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
10360
10361 /* Reset the load counter */
10362 bnx2x_clear_load_cnt(bp);
10363
10364 dev->watchdog_timeo = TX_TIMEOUT;
10365
10366 dev->netdev_ops = &bnx2x_netdev_ops;
10367 bnx2x_set_ethtool_ops(dev);
10368
10369 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
10370 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
10371 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
10372
10373 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
10374 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
10375
10376 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
10377 if (bp->flags & USING_DAC_FLAG)
10378 dev->features |= NETIF_F_HIGHDMA;
10379
10380 /* Add Loopback capability to the device */
10381 dev->hw_features |= NETIF_F_LOOPBACK;
10382
10383#ifdef BCM_DCBNL
10384 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
10385#endif
10386
10387 /* get_port_hwinfo() will set prtad and mmds properly */
10388 bp->mdio.prtad = MDIO_PRTAD_NONE;
10389 bp->mdio.mmds = 0;
10390 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10391 bp->mdio.dev = dev;
10392 bp->mdio.mdio_read = bnx2x_mdio_read;
10393 bp->mdio.mdio_write = bnx2x_mdio_write;
10394
10395 return 0;
10396
10397err_out_release:
10398 if (atomic_read(&pdev->enable_cnt) == 1)
10399 pci_release_regions(pdev);
10400
10401err_out_disable:
10402 pci_disable_device(pdev);
10403 pci_set_drvdata(pdev, NULL);
10404
10405err_out:
10406 return rc;
10407}
10408
10409static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
10410 int *width, int *speed)
10411{
10412 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10413
10414 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10415
10416 /* return value of 1=2.5GHz 2=5GHz */
10417 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10418}
10419
10420static int bnx2x_check_firmware(struct bnx2x *bp)
10421{
10422 const struct firmware *firmware = bp->firmware;
10423 struct bnx2x_fw_file_hdr *fw_hdr;
10424 struct bnx2x_fw_file_section *sections;
10425 u32 offset, len, num_ops;
10426 u16 *ops_offsets;
10427 int i;
10428 const u8 *fw_ver;
10429
10430 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
10431 return -EINVAL;
10432
10433 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
10434 sections = (struct bnx2x_fw_file_section *)fw_hdr;
10435
10436 /* Make sure none of the offsets and sizes make us read beyond
10437 * the end of the firmware data */
10438 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
10439 offset = be32_to_cpu(sections[i].offset);
10440 len = be32_to_cpu(sections[i].len);
10441 if (offset + len > firmware->size) {
10442 dev_err(&bp->pdev->dev,
10443 "Section %d length is out of bounds\n", i);
10444 return -EINVAL;
10445 }
10446 }
10447
10448 /* Likewise for the init_ops offsets */
10449 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
10450 ops_offsets = (u16 *)(firmware->data + offset);
10451 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
10452
10453 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
10454 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
10455 dev_err(&bp->pdev->dev,
10456 "Section offset %d is out of bounds\n", i);
10457 return -EINVAL;
10458 }
10459 }
10460
10461 /* Check FW version */
10462 offset = be32_to_cpu(fw_hdr->fw_version.offset);
10463 fw_ver = firmware->data + offset;
10464 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
10465 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
10466 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
10467 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
10468 dev_err(&bp->pdev->dev,
10469 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
10470 fw_ver[0], fw_ver[1], fw_ver[2],
10471 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
10472 BCM_5710_FW_MINOR_VERSION,
10473 BCM_5710_FW_REVISION_VERSION,
10474 BCM_5710_FW_ENGINEERING_VERSION);
10475 return -EINVAL;
10476 }
10477
10478 return 0;
10479}
10480
10481static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
10482{
10483 const __be32 *source = (const __be32 *)_source;
10484 u32 *target = (u32 *)_target;
10485 u32 i;
10486
10487 for (i = 0; i < n/4; i++)
10488 target[i] = be32_to_cpu(source[i]);
10489}
10490
10491/*
10492 Ops array is stored in the following format:
10493 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
10494 */
10495static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
10496{
10497 const __be32 *source = (const __be32 *)_source;
10498 struct raw_op *target = (struct raw_op *)_target;
10499 u32 i, j, tmp;
10500
10501 for (i = 0, j = 0; i < n/8; i++, j += 2) {
10502 tmp = be32_to_cpu(source[j]);
10503 target[i].op = (tmp >> 24) & 0xff;
10504 target[i].offset = tmp & 0xffffff;
10505 target[i].raw_data = be32_to_cpu(source[j + 1]);
10506 }
10507}
10508
10509/**
10510 * IRO array is stored in the following format:
10511 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
10512 */
10513static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
10514{
10515 const __be32 *source = (const __be32 *)_source;
10516 struct iro *target = (struct iro *)_target;
10517 u32 i, j, tmp;
10518
10519 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
10520 target[i].base = be32_to_cpu(source[j]);
10521 j++;
10522 tmp = be32_to_cpu(source[j]);
10523 target[i].m1 = (tmp >> 16) & 0xffff;
10524 target[i].m2 = tmp & 0xffff;
10525 j++;
10526 tmp = be32_to_cpu(source[j]);
10527 target[i].m3 = (tmp >> 16) & 0xffff;
10528 target[i].size = tmp & 0xffff;
10529 j++;
10530 }
10531}
10532
10533static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
10534{
10535 const __be16 *source = (const __be16 *)_source;
10536 u16 *target = (u16 *)_target;
10537 u32 i;
10538
10539 for (i = 0; i < n/2; i++)
10540 target[i] = be16_to_cpu(source[i]);
10541}
10542
10543#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
10544do { \
10545 u32 len = be32_to_cpu(fw_hdr->arr.len); \
10546 bp->arr = kmalloc(len, GFP_KERNEL); \
10547 if (!bp->arr) { \
10548 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
10549 goto lbl; \
10550 } \
10551 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
10552 (u8 *)bp->arr, len); \
10553} while (0)
10554
10555int bnx2x_init_firmware(struct bnx2x *bp)
10556{
10557 const char *fw_file_name;
10558 struct bnx2x_fw_file_hdr *fw_hdr;
10559 int rc;
10560
10561 if (CHIP_IS_E1(bp))
10562 fw_file_name = FW_FILE_NAME_E1;
10563 else if (CHIP_IS_E1H(bp))
10564 fw_file_name = FW_FILE_NAME_E1H;
10565 else if (!CHIP_IS_E1x(bp))
10566 fw_file_name = FW_FILE_NAME_E2;
10567 else {
10568 BNX2X_ERR("Unsupported chip revision\n");
10569 return -EINVAL;
10570 }
10571
10572 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
10573
10574 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
10575 if (rc) {
10576 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
10577 goto request_firmware_exit;
10578 }
10579
10580 rc = bnx2x_check_firmware(bp);
10581 if (rc) {
10582 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
10583 goto request_firmware_exit;
10584 }
10585
10586 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
10587
10588 /* Initialize the pointers to the init arrays */
10589 /* Blob */
10590 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
10591
10592 /* Opcodes */
10593 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
10594
10595 /* Offsets */
10596 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
10597 be16_to_cpu_n);
10598
10599 /* STORMs firmware */
10600 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10601 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
10602 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
10603 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
10604 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10605 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
10606 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
10607 be32_to_cpu(fw_hdr->usem_pram_data.offset);
10608 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10609 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
10610 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
10611 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
10612 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10613 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
10614 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
10615 be32_to_cpu(fw_hdr->csem_pram_data.offset);
10616 /* IRO */
10617 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
10618
10619 return 0;
10620
10621iro_alloc_err:
10622 kfree(bp->init_ops_offsets);
10623init_offsets_alloc_err:
10624 kfree(bp->init_ops);
10625init_ops_alloc_err:
10626 kfree(bp->init_data);
10627request_firmware_exit:
10628 release_firmware(bp->firmware);
10629
10630 return rc;
10631}
10632
10633static void bnx2x_release_firmware(struct bnx2x *bp)
10634{
10635 kfree(bp->init_ops_offsets);
10636 kfree(bp->init_ops);
10637 kfree(bp->init_data);
10638 release_firmware(bp->firmware);
10639}
10640
10641
10642static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
10643 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
10644 .init_hw_cmn = bnx2x_init_hw_common,
10645 .init_hw_port = bnx2x_init_hw_port,
10646 .init_hw_func = bnx2x_init_hw_func,
10647
10648 .reset_hw_cmn = bnx2x_reset_common,
10649 .reset_hw_port = bnx2x_reset_port,
10650 .reset_hw_func = bnx2x_reset_func,
10651
10652 .gunzip_init = bnx2x_gunzip_init,
10653 .gunzip_end = bnx2x_gunzip_end,
10654
10655 .init_fw = bnx2x_init_firmware,
10656 .release_fw = bnx2x_release_firmware,
10657};
10658
10659void bnx2x__init_func_obj(struct bnx2x *bp)
10660{
10661 /* Prepare DMAE related driver resources */
10662 bnx2x_setup_dmae(bp);
10663
10664 bnx2x_init_func_obj(bp, &bp->func_obj,
10665 bnx2x_sp(bp, func_rdata),
10666 bnx2x_sp_mapping(bp, func_rdata),
10667 &bnx2x_func_sp_drv);
10668}
10669
10670/* must be called after sriov-enable */
10671static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
10672{
10673 int cid_count = BNX2X_L2_CID_COUNT(bp);
10674
10675#ifdef BCM_CNIC
10676 cid_count += CNIC_CID_MAX;
10677#endif
10678 return roundup(cid_count, QM_CID_ROUND);
10679}
10680
10681/**
10682 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
10683 *
10684 * @dev: pci device
10685 *
10686 */
10687static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
10688{
10689 int pos;
10690 u16 control;
10691
10692 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
10693
10694 /*
10695 * If MSI-X is not supported - return number of SBs needed to support
10696 * one fast path queue: one FP queue + SB for CNIC
10697 */
10698 if (!pos)
10699 return 1 + CNIC_PRESENT;
10700
10701 /*
10702 * The value in the PCI configuration space is the index of the last
10703 * entry, namely one less than the actual size of the table, which is
10704 * exactly what we want to return from this function: number of all SBs
10705 * without the default SB.
10706 */
10707 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
10708 return control & PCI_MSIX_FLAGS_QSIZE;
10709}
10710
10711static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10712 const struct pci_device_id *ent)
10713{
10714 struct net_device *dev = NULL;
10715 struct bnx2x *bp;
10716 int pcie_width, pcie_speed;
10717 int rc, max_non_def_sbs;
10718 int rx_count, tx_count, rss_count;
10719 /*
10720 * An estimated maximum supported CoS number according to the chip
10721 * version.
10722 * We will try to roughly estimate the maximum number of CoSes this chip
10723 * may support in order to minimize the memory allocated for Tx
10724 * netdev_queue's. This number will be accurately calculated during the
10725 * initialization of bp->max_cos based on the chip versions AND chip
10726 * revision in the bnx2x_init_bp().
10727 */
10728 u8 max_cos_est = 0;
10729
10730 switch (ent->driver_data) {
10731 case BCM57710:
10732 case BCM57711:
10733 case BCM57711E:
10734 max_cos_est = BNX2X_MULTI_TX_COS_E1X;
10735 break;
10736
10737 case BCM57712:
10738 case BCM57712_MF:
10739 max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
10740 break;
10741
10742 case BCM57800:
10743 case BCM57800_MF:
10744 case BCM57810:
10745 case BCM57810_MF:
10746 case BCM57840:
10747 case BCM57840_MF:
10748 max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
10749 break;
10750
10751 default:
10752 pr_err("Unknown board_type (%ld), aborting\n",
10753 ent->driver_data);
10754 return -ENODEV;
10755 }
10756
10757 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
10758
10759 /* !!! FIXME !!!
10760 * Do not allow the maximum SB count to grow above 16
10761 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
10762 * We will use the FP_SB_MAX_E1x macro for this matter.
10763 */
10764 max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
10765
10766 WARN_ON(!max_non_def_sbs);
10767
10768 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
10769 rss_count = max_non_def_sbs - CNIC_PRESENT;
10770
10771 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
10772 rx_count = rss_count + FCOE_PRESENT;
10773
10774 /*
10775 * Maximum number of netdev Tx queues:
10776 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
10777 */
10778 tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
10779
10780 /* dev zeroed in init_etherdev */
10781 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
10782 if (!dev) {
10783 dev_err(&pdev->dev, "Cannot allocate net device\n");
10784 return -ENOMEM;
10785 }
10786
10787 bp = netdev_priv(dev);
10788
10789 DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
10790 tx_count, rx_count);
10791
10792 bp->igu_sb_cnt = max_non_def_sbs;
10793 bp->msg_enable = debug;
10794 pci_set_drvdata(pdev, dev);
10795
10796 rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
10797 if (rc < 0) {
10798 free_netdev(dev);
10799 return rc;
10800 }
10801
10802 DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs);
10803
10804 rc = bnx2x_init_bp(bp);
10805 if (rc)
10806 goto init_one_exit;
10807
10808 /*
10809 * Map doorbels here as we need the real value of bp->max_cos which
10810 * is initialized in bnx2x_init_bp().
10811 */
10812 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10813 min_t(u64, BNX2X_DB_SIZE(bp),
10814 pci_resource_len(pdev, 2)));
10815 if (!bp->doorbells) {
10816 dev_err(&bp->pdev->dev,
10817 "Cannot map doorbell space, aborting\n");
10818 rc = -ENOMEM;
10819 goto init_one_exit;
10820 }
10821
10822 /* calc qm_cid_count */
10823 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
10824
10825#ifdef BCM_CNIC
10826 /* disable FCOE L2 queue for E1x and E3*/
10827 if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
10828 bp->flags |= NO_FCOE_FLAG;
10829
10830#endif
10831
10832 /* Configure interrupt mode: try to enable MSI-X/MSI if
10833 * needed, set bp->num_queues appropriately.
10834 */
10835 bnx2x_set_int_mode(bp);
10836
10837 /* Add all NAPI objects */
10838 bnx2x_add_all_napi(bp);
10839
10840 rc = register_netdev(dev);
10841 if (rc) {
10842 dev_err(&pdev->dev, "Cannot register net device\n");
10843 goto init_one_exit;
10844 }
10845
10846#ifdef BCM_CNIC
10847 if (!NO_FCOE(bp)) {
10848 /* Add storage MAC address */
10849 rtnl_lock();
10850 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
10851 rtnl_unlock();
10852 }
10853#endif
10854
10855 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
10856
10857 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
10858 " IRQ %d, ", board_info[ent->driver_data].name,
10859 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10860 pcie_width,
10861 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
10862 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
10863 "5GHz (Gen2)" : "2.5GHz",
10864 dev->base_addr, bp->pdev->irq);
10865 pr_cont("node addr %pM\n", dev->dev_addr);
10866
10867 return 0;
10868
10869init_one_exit:
10870 if (bp->regview)
10871 iounmap(bp->regview);
10872
10873 if (bp->doorbells)
10874 iounmap(bp->doorbells);
10875
10876 free_netdev(dev);
10877
10878 if (atomic_read(&pdev->enable_cnt) == 1)
10879 pci_release_regions(pdev);
10880
10881 pci_disable_device(pdev);
10882 pci_set_drvdata(pdev, NULL);
10883
10884 return rc;
10885}
10886
10887static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10888{
10889 struct net_device *dev = pci_get_drvdata(pdev);
10890 struct bnx2x *bp;
10891
10892 if (!dev) {
10893 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
10894 return;
10895 }
10896 bp = netdev_priv(dev);
10897
10898#ifdef BCM_CNIC
10899 /* Delete storage MAC address */
10900 if (!NO_FCOE(bp)) {
10901 rtnl_lock();
10902 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
10903 rtnl_unlock();
10904 }
10905#endif
10906
10907#ifdef BCM_DCBNL
10908 /* Delete app tlvs from dcbnl */
10909 bnx2x_dcbnl_update_applist(bp, true);
10910#endif
10911
10912 unregister_netdev(dev);
10913
10914 /* Delete all NAPI objects */
10915 bnx2x_del_all_napi(bp);
10916
10917 /* Power on: we can't let PCI layer write to us while we are in D3 */
10918 bnx2x_set_power_state(bp, PCI_D0);
10919
10920 /* Disable MSI/MSI-X */
10921 bnx2x_disable_msi(bp);
10922
10923 /* Power off */
10924 bnx2x_set_power_state(bp, PCI_D3hot);
10925
10926 /* Make sure RESET task is not scheduled before continuing */
10927 cancel_delayed_work_sync(&bp->sp_rtnl_task);
10928
10929 if (bp->regview)
10930 iounmap(bp->regview);
10931
10932 if (bp->doorbells)
10933 iounmap(bp->doorbells);
10934
10935 bnx2x_free_mem_bp(bp);
10936
10937 free_netdev(dev);
10938
10939 if (atomic_read(&pdev->enable_cnt) == 1)
10940 pci_release_regions(pdev);
10941
10942 pci_disable_device(pdev);
10943 pci_set_drvdata(pdev, NULL);
10944}
10945
10946static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10947{
10948 int i;
10949
10950 bp->state = BNX2X_STATE_ERROR;
10951
10952 bp->rx_mode = BNX2X_RX_MODE_NONE;
10953
10954#ifdef BCM_CNIC
10955 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
10956#endif
10957 /* Stop Tx */
10958 bnx2x_tx_disable(bp);
10959
10960 bnx2x_netif_stop(bp, 0);
10961
10962 del_timer_sync(&bp->timer);
10963
10964 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10965
10966 /* Release IRQs */
10967 bnx2x_free_irq(bp);
10968
10969 /* Free SKBs, SGEs, TPA pool and driver internals */
10970 bnx2x_free_skbs(bp);
10971
10972 for_each_rx_queue(bp, i)
10973 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10974
10975 bnx2x_free_mem(bp);
10976
10977 bp->state = BNX2X_STATE_CLOSED;
10978
10979 netif_carrier_off(bp->dev);
10980
10981 return 0;
10982}
10983
10984static void bnx2x_eeh_recover(struct bnx2x *bp)
10985{
10986 u32 val;
10987
10988 mutex_init(&bp->port.phy_mutex);
10989
10990 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10991 bp->link_params.shmem_base = bp->common.shmem_base;
10992 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10993
10994 if (!bp->common.shmem_base ||
10995 (bp->common.shmem_base < 0xA0000) ||
10996 (bp->common.shmem_base >= 0xC0000)) {
10997 BNX2X_DEV_INFO("MCP not active\n");
10998 bp->flags |= NO_MCP_FLAG;
10999 return;
11000 }
11001
11002 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11003 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11004 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11005 BNX2X_ERR("BAD MCP validity signature\n");
11006
11007 if (!BP_NOMCP(bp)) {
11008 bp->fw_seq =
11009 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11010 DRV_MSG_SEQ_NUMBER_MASK);
11011 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11012 }
11013}
11014
11015/**
11016 * bnx2x_io_error_detected - called when PCI error is detected
11017 * @pdev: Pointer to PCI device
11018 * @state: The current pci connection state
11019 *
11020 * This function is called after a PCI bus error affecting
11021 * this device has been detected.
11022 */
11023static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11024 pci_channel_state_t state)
11025{
11026 struct net_device *dev = pci_get_drvdata(pdev);
11027 struct bnx2x *bp = netdev_priv(dev);
11028
11029 rtnl_lock();
11030
11031 netif_device_detach(dev);
11032
11033 if (state == pci_channel_io_perm_failure) {
11034 rtnl_unlock();
11035 return PCI_ERS_RESULT_DISCONNECT;
11036 }
11037
11038 if (netif_running(dev))
11039 bnx2x_eeh_nic_unload(bp);
11040
11041 pci_disable_device(pdev);
11042
11043 rtnl_unlock();
11044
11045 /* Request a slot reset */
11046 return PCI_ERS_RESULT_NEED_RESET;
11047}
11048
11049/**
11050 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11051 * @pdev: Pointer to PCI device
11052 *
11053 * Restart the card from scratch, as if from a cold-boot.
11054 */
11055static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11056{
11057 struct net_device *dev = pci_get_drvdata(pdev);
11058 struct bnx2x *bp = netdev_priv(dev);
11059
11060 rtnl_lock();
11061
11062 if (pci_enable_device(pdev)) {
11063 dev_err(&pdev->dev,
11064 "Cannot re-enable PCI device after reset\n");
11065 rtnl_unlock();
11066 return PCI_ERS_RESULT_DISCONNECT;
11067 }
11068
11069 pci_set_master(pdev);
11070 pci_restore_state(pdev);
11071
11072 if (netif_running(dev))
11073 bnx2x_set_power_state(bp, PCI_D0);
11074
11075 rtnl_unlock();
11076
11077 return PCI_ERS_RESULT_RECOVERED;
11078}
11079
11080/**
11081 * bnx2x_io_resume - called when traffic can start flowing again
11082 * @pdev: Pointer to PCI device
11083 *
11084 * This callback is called when the error recovery driver tells us that
11085 * its OK to resume normal operation.
11086 */
11087static void bnx2x_io_resume(struct pci_dev *pdev)
11088{
11089 struct net_device *dev = pci_get_drvdata(pdev);
11090 struct bnx2x *bp = netdev_priv(dev);
11091
11092 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11093 netdev_err(bp->dev, "Handling parity error recovery. "
11094 "Try again later\n");
11095 return;
11096 }
11097
11098 rtnl_lock();
11099
11100 bnx2x_eeh_recover(bp);
11101
11102 if (netif_running(dev))
11103 bnx2x_nic_load(bp, LOAD_NORMAL);
11104
11105 netif_device_attach(dev);
11106
11107 rtnl_unlock();
11108}
11109
11110static struct pci_error_handlers bnx2x_err_handler = {
11111 .error_detected = bnx2x_io_error_detected,
11112 .slot_reset = bnx2x_io_slot_reset,
11113 .resume = bnx2x_io_resume,
11114};
11115
11116static struct pci_driver bnx2x_pci_driver = {
11117 .name = DRV_MODULE_NAME,
11118 .id_table = bnx2x_pci_tbl,
11119 .probe = bnx2x_init_one,
11120 .remove = __devexit_p(bnx2x_remove_one),
11121 .suspend = bnx2x_suspend,
11122 .resume = bnx2x_resume,
11123 .err_handler = &bnx2x_err_handler,
11124};
11125
11126static int __init bnx2x_init(void)
11127{
11128 int ret;
11129
11130 pr_info("%s", version);
11131
11132 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11133 if (bnx2x_wq == NULL) {
11134 pr_err("Cannot create workqueue\n");
11135 return -ENOMEM;
11136 }
11137
11138 ret = pci_register_driver(&bnx2x_pci_driver);
11139 if (ret) {
11140 pr_err("Cannot register driver\n");
11141 destroy_workqueue(bnx2x_wq);
11142 }
11143 return ret;
11144}
11145
11146static void __exit bnx2x_cleanup(void)
11147{
11148 pci_unregister_driver(&bnx2x_pci_driver);
11149
11150 destroy_workqueue(bnx2x_wq);
11151}
11152
11153void bnx2x_notify_link_changed(struct bnx2x *bp)
11154{
11155 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
11156}
11157
11158module_init(bnx2x_init);
11159module_exit(bnx2x_cleanup);
11160
11161#ifdef BCM_CNIC
11162/**
11163 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
11164 *
11165 * @bp: driver handle
11166 * @set: set or clear the CAM entry
11167 *
11168 * This function will wait until the ramdord completion returns.
11169 * Return 0 if success, -ENODEV if ramrod doesn't return.
11170 */
11171static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
11172{
11173 unsigned long ramrod_flags = 0;
11174
11175 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11176 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
11177 &bp->iscsi_l2_mac_obj, true,
11178 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
11179}
11180
11181/* count denotes the number of new completions we have seen */
11182static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
11183{
11184 struct eth_spe *spe;
11185
11186#ifdef BNX2X_STOP_ON_ERROR
11187 if (unlikely(bp->panic))
11188 return;
11189#endif
11190
11191 spin_lock_bh(&bp->spq_lock);
11192 BUG_ON(bp->cnic_spq_pending < count);
11193 bp->cnic_spq_pending -= count;
11194
11195
11196 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
11197 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
11198 & SPE_HDR_CONN_TYPE) >>
11199 SPE_HDR_CONN_TYPE_SHIFT;
11200 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
11201 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
11202
11203 /* Set validation for iSCSI L2 client before sending SETUP
11204 * ramrod
11205 */
11206 if (type == ETH_CONNECTION_TYPE) {
11207 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
11208 bnx2x_set_ctx_validation(bp, &bp->context.
11209 vcxt[BNX2X_ISCSI_ETH_CID].eth,
11210 BNX2X_ISCSI_ETH_CID);
11211 }
11212
11213 /*
11214 * There may be not more than 8 L2, not more than 8 L5 SPEs
11215 * and in the air. We also check that number of outstanding
11216 * COMMON ramrods is not more than the EQ and SPQ can
11217 * accommodate.
11218 */
11219 if (type == ETH_CONNECTION_TYPE) {
11220 if (!atomic_read(&bp->cq_spq_left))
11221 break;
11222 else
11223 atomic_dec(&bp->cq_spq_left);
11224 } else if (type == NONE_CONNECTION_TYPE) {
11225 if (!atomic_read(&bp->eq_spq_left))
11226 break;
11227 else
11228 atomic_dec(&bp->eq_spq_left);
11229 } else if ((type == ISCSI_CONNECTION_TYPE) ||
11230 (type == FCOE_CONNECTION_TYPE)) {
11231 if (bp->cnic_spq_pending >=
11232 bp->cnic_eth_dev.max_kwqe_pending)
11233 break;
11234 else
11235 bp->cnic_spq_pending++;
11236 } else {
11237 BNX2X_ERR("Unknown SPE type: %d\n", type);
11238 bnx2x_panic();
11239 break;
11240 }
11241
11242 spe = bnx2x_sp_get_next(bp);
11243 *spe = *bp->cnic_kwq_cons;
11244
11245 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
11246 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
11247
11248 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
11249 bp->cnic_kwq_cons = bp->cnic_kwq;
11250 else
11251 bp->cnic_kwq_cons++;
11252 }
11253 bnx2x_sp_prod_update(bp);
11254 spin_unlock_bh(&bp->spq_lock);
11255}
11256
11257static int bnx2x_cnic_sp_queue(struct net_device *dev,
11258 struct kwqe_16 *kwqes[], u32 count)
11259{
11260 struct bnx2x *bp = netdev_priv(dev);
11261 int i;
11262
11263#ifdef BNX2X_STOP_ON_ERROR
11264 if (unlikely(bp->panic))
11265 return -EIO;
11266#endif
11267
11268 spin_lock_bh(&bp->spq_lock);
11269
11270 for (i = 0; i < count; i++) {
11271 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
11272
11273 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
11274 break;
11275
11276 *bp->cnic_kwq_prod = *spe;
11277
11278 bp->cnic_kwq_pending++;
11279
11280 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
11281 spe->hdr.conn_and_cmd_data, spe->hdr.type,
11282 spe->data.update_data_addr.hi,
11283 spe->data.update_data_addr.lo,
11284 bp->cnic_kwq_pending);
11285
11286 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
11287 bp->cnic_kwq_prod = bp->cnic_kwq;
11288 else
11289 bp->cnic_kwq_prod++;
11290 }
11291
11292 spin_unlock_bh(&bp->spq_lock);
11293
11294 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
11295 bnx2x_cnic_sp_post(bp, 0);
11296
11297 return i;
11298}
11299
11300static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
11301{
11302 struct cnic_ops *c_ops;
11303 int rc = 0;
11304
11305 mutex_lock(&bp->cnic_mutex);
11306 c_ops = rcu_dereference_protected(bp->cnic_ops,
11307 lockdep_is_held(&bp->cnic_mutex));
11308 if (c_ops)
11309 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
11310 mutex_unlock(&bp->cnic_mutex);
11311
11312 return rc;
11313}
11314
11315static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
11316{
11317 struct cnic_ops *c_ops;
11318 int rc = 0;
11319
11320 rcu_read_lock();
11321 c_ops = rcu_dereference(bp->cnic_ops);
11322 if (c_ops)
11323 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
11324 rcu_read_unlock();
11325
11326 return rc;
11327}
11328
11329/*
11330 * for commands that have no data
11331 */
11332int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
11333{
11334 struct cnic_ctl_info ctl = {0};
11335
11336 ctl.cmd = cmd;
11337
11338 return bnx2x_cnic_ctl_send(bp, &ctl);
11339}
11340
11341static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
11342{
11343 struct cnic_ctl_info ctl = {0};
11344
11345 /* first we tell CNIC and only then we count this as a completion */
11346 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
11347 ctl.data.comp.cid = cid;
11348 ctl.data.comp.error = err;
11349
11350 bnx2x_cnic_ctl_send_bh(bp, &ctl);
11351 bnx2x_cnic_sp_post(bp, 0);
11352}
11353
11354
11355/* Called with netif_addr_lock_bh() taken.
11356 * Sets an rx_mode config for an iSCSI ETH client.
11357 * Doesn't block.
11358 * Completion should be checked outside.
11359 */
11360static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
11361{
11362 unsigned long accept_flags = 0, ramrod_flags = 0;
11363 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
11364 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
11365
11366 if (start) {
11367 /* Start accepting on iSCSI L2 ring. Accept all multicasts
11368 * because it's the only way for UIO Queue to accept
11369 * multicasts (in non-promiscuous mode only one Queue per
11370 * function will receive multicast packets (leading in our
11371 * case).
11372 */
11373 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
11374 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
11375 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
11376 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
11377
11378 /* Clear STOP_PENDING bit if START is requested */
11379 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
11380
11381 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
11382 } else
11383 /* Clear START_PENDING bit if STOP is requested */
11384 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
11385
11386 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
11387 set_bit(sched_state, &bp->sp_state);
11388 else {
11389 __set_bit(RAMROD_RX, &ramrod_flags);
11390 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
11391 ramrod_flags);
11392 }
11393}
11394
11395
11396static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
11397{
11398 struct bnx2x *bp = netdev_priv(dev);
11399 int rc = 0;
11400
11401 switch (ctl->cmd) {
11402 case DRV_CTL_CTXTBL_WR_CMD: {
11403 u32 index = ctl->data.io.offset;
11404 dma_addr_t addr = ctl->data.io.dma_addr;
11405
11406 bnx2x_ilt_wr(bp, index, addr);
11407 break;
11408 }
11409
11410 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
11411 int count = ctl->data.credit.credit_count;
11412
11413 bnx2x_cnic_sp_post(bp, count);
11414 break;
11415 }
11416
11417 /* rtnl_lock is held. */
11418 case DRV_CTL_START_L2_CMD: {
11419 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11420 unsigned long sp_bits = 0;
11421
11422 /* Configure the iSCSI classification object */
11423 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
11424 cp->iscsi_l2_client_id,
11425 cp->iscsi_l2_cid, BP_FUNC(bp),
11426 bnx2x_sp(bp, mac_rdata),
11427 bnx2x_sp_mapping(bp, mac_rdata),
11428 BNX2X_FILTER_MAC_PENDING,
11429 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
11430 &bp->macs_pool);
11431
11432 /* Set iSCSI MAC address */
11433 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
11434 if (rc)
11435 break;
11436
11437 mmiowb();
11438 barrier();
11439
11440 /* Start accepting on iSCSI L2 ring */
11441
11442 netif_addr_lock_bh(dev);
11443 bnx2x_set_iscsi_eth_rx_mode(bp, true);
11444 netif_addr_unlock_bh(dev);
11445
11446 /* bits to wait on */
11447 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
11448 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
11449
11450 if (!bnx2x_wait_sp_comp(bp, sp_bits))
11451 BNX2X_ERR("rx_mode completion timed out!\n");
11452
11453 break;
11454 }
11455
11456 /* rtnl_lock is held. */
11457 case DRV_CTL_STOP_L2_CMD: {
11458 unsigned long sp_bits = 0;
11459
11460 /* Stop accepting on iSCSI L2 ring */
11461 netif_addr_lock_bh(dev);
11462 bnx2x_set_iscsi_eth_rx_mode(bp, false);
11463 netif_addr_unlock_bh(dev);
11464
11465 /* bits to wait on */
11466 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
11467 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
11468
11469 if (!bnx2x_wait_sp_comp(bp, sp_bits))
11470 BNX2X_ERR("rx_mode completion timed out!\n");
11471
11472 mmiowb();
11473 barrier();
11474
11475 /* Unset iSCSI L2 MAC */
11476 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
11477 BNX2X_ISCSI_ETH_MAC, true);
11478 break;
11479 }
11480 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
11481 int count = ctl->data.credit.credit_count;
11482
11483 smp_mb__before_atomic_inc();
11484 atomic_add(count, &bp->cq_spq_left);
11485 smp_mb__after_atomic_inc();
11486 break;
11487 }
11488
11489 default:
11490 BNX2X_ERR("unknown command %x\n", ctl->cmd);
11491 rc = -EINVAL;
11492 }
11493
11494 return rc;
11495}
11496
11497void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
11498{
11499 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11500
11501 if (bp->flags & USING_MSIX_FLAG) {
11502 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
11503 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
11504 cp->irq_arr[0].vector = bp->msix_table[1].vector;
11505 } else {
11506 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
11507 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
11508 }
11509 if (!CHIP_IS_E1x(bp))
11510 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
11511 else
11512 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
11513
11514 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
11515 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
11516 cp->irq_arr[1].status_blk = bp->def_status_blk;
11517 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
11518 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
11519
11520 cp->num_irq = 2;
11521}
11522
11523static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
11524 void *data)
11525{
11526 struct bnx2x *bp = netdev_priv(dev);
11527 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11528
11529 if (ops == NULL)
11530 return -EINVAL;
11531
11532 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
11533 if (!bp->cnic_kwq)
11534 return -ENOMEM;
11535
11536 bp->cnic_kwq_cons = bp->cnic_kwq;
11537 bp->cnic_kwq_prod = bp->cnic_kwq;
11538 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
11539
11540 bp->cnic_spq_pending = 0;
11541 bp->cnic_kwq_pending = 0;
11542
11543 bp->cnic_data = data;
11544
11545 cp->num_irq = 0;
11546 cp->drv_state |= CNIC_DRV_STATE_REGD;
11547 cp->iro_arr = bp->iro_arr;
11548
11549 bnx2x_setup_cnic_irq_info(bp);
11550
11551 rcu_assign_pointer(bp->cnic_ops, ops);
11552
11553 return 0;
11554}
11555
11556static int bnx2x_unregister_cnic(struct net_device *dev)
11557{
11558 struct bnx2x *bp = netdev_priv(dev);
11559 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11560
11561 mutex_lock(&bp->cnic_mutex);
11562 cp->drv_state = 0;
11563 rcu_assign_pointer(bp->cnic_ops, NULL);
11564 mutex_unlock(&bp->cnic_mutex);
11565 synchronize_rcu();
11566 kfree(bp->cnic_kwq);
11567 bp->cnic_kwq = NULL;
11568
11569 return 0;
11570}
11571
11572struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
11573{
11574 struct bnx2x *bp = netdev_priv(dev);
11575 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11576
11577 /* If both iSCSI and FCoE are disabled - return NULL in
11578 * order to indicate CNIC that it should not try to work
11579 * with this device.
11580 */
11581 if (NO_ISCSI(bp) && NO_FCOE(bp))
11582 return NULL;
11583
11584 cp->drv_owner = THIS_MODULE;
11585 cp->chip_id = CHIP_ID(bp);
11586 cp->pdev = bp->pdev;
11587 cp->io_base = bp->regview;
11588 cp->io_base2 = bp->doorbells;
11589 cp->max_kwqe_pending = 8;
11590 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
11591 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
11592 bnx2x_cid_ilt_lines(bp);
11593 cp->ctx_tbl_len = CNIC_ILT_LINES;
11594 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
11595 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
11596 cp->drv_ctl = bnx2x_drv_ctl;
11597 cp->drv_register_cnic = bnx2x_register_cnic;
11598 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
11599 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
11600 cp->iscsi_l2_client_id =
11601 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
11602 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
11603
11604 if (NO_ISCSI_OOO(bp))
11605 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
11606
11607 if (NO_ISCSI(bp))
11608 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
11609
11610 if (NO_FCOE(bp))
11611 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
11612
11613 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
11614 "starting cid %d\n",
11615 cp->ctx_blk_size,
11616 cp->ctx_tbl_offset,
11617 cp->ctx_tbl_len,
11618 cp->starting_cid);
11619 return cp;
11620}
11621EXPORT_SYMBOL(bnx2x_cnic_probe);
11622
11623#endif /* BCM_CNIC */
11624
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
new file mode 100644
index 00000000000..fc7bd0f23c0
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -0,0 +1,7177 @@
1/* bnx2x_reg.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * The registers description starts with the register Access type followed
10 * by size in bits. For example [RW 32]. The access types are:
11 * R - Read only
12 * RC - Clear on read
13 * RW - Read/Write
14 * ST - Statistics register (clear on read)
15 * W - Write only
16 * WB - Wide bus register - the size is over 32 bits and it should be
17 * read/write in consecutive 32 bits accesses
18 * WR - Write Clear (write 1 to clear the bit)
19 *
20 */
21#ifndef BNX2X_REG_H
22#define BNX2X_REG_H
23
24#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
25#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
26#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
27#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
28#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
29#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
30/* [RW 1] Initiate the ATC array - reset all the valid bits */
31#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
32/* [R 1] ATC initalization done */
33#define ATC_REG_ATC_INIT_DONE 0x1100bc
34/* [RC 6] Interrupt register #0 read clear */
35#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
36/* [RW 5] Parity mask register #0 read/write */
37#define ATC_REG_ATC_PRTY_MASK 0x1101d8
38/* [RC 5] Parity register #0 read clear */
39#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
40/* [RW 19] Interrupt mask register #0 read/write */
41#define BRB1_REG_BRB1_INT_MASK 0x60128
42/* [R 19] Interrupt register #0 read */
43#define BRB1_REG_BRB1_INT_STS 0x6011c
44/* [RW 4] Parity mask register #0 read/write */
45#define BRB1_REG_BRB1_PRTY_MASK 0x60138
46/* [R 4] Parity register #0 read */
47#define BRB1_REG_BRB1_PRTY_STS 0x6012c
48/* [RC 4] Parity register #0 read clear */
49#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
50/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
51 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
52 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
53 * following reset the first rbc access to this reg must be write; there can
54 * be no more rbc writes after the first one; there can be any number of rbc
55 * read following the first write; rbc access not following these rules will
56 * result in hang condition. */
57#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
58/* [RW 10] The number of free blocks below which the full signal to class 0
59 * is asserted */
60#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
61#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230
62/* [RW 11] The number of free blocks above which the full signal to class 0
63 * is de-asserted */
64#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
65#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234
66/* [RW 11] The number of free blocks below which the full signal to class 1
67 * is asserted */
68#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
69#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238
70/* [RW 11] The number of free blocks above which the full signal to class 1
71 * is de-asserted */
72#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
73#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c
74/* [RW 11] The number of free blocks below which the full signal to the LB
75 * port is asserted */
76#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
77/* [RW 10] The number of free blocks above which the full signal to the LB
78 * port is de-asserted */
79#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
80/* [RW 10] The number of free blocks above which the High_llfc signal to
81 interface #n is de-asserted. */
82#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
83/* [RW 10] The number of free blocks below which the High_llfc signal to
84 interface #n is asserted. */
85#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
86/* [RW 11] The number of blocks guarantied for the LB port */
87#define BRB1_REG_LB_GUARANTIED 0x601ec
88/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
89 * before signaling XON. */
90#define BRB1_REG_LB_GUARANTIED_HYST 0x60264
91/* [RW 24] LL RAM data. */
92#define BRB1_REG_LL_RAM 0x61000
93/* [RW 10] The number of free blocks above which the Low_llfc signal to
94 interface #n is de-asserted. */
95#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
96/* [RW 10] The number of free blocks below which the Low_llfc signal to
97 interface #n is asserted. */
98#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
99/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
100 * register is applicable only when per_class_guaranty_mode is set. */
101#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244
102/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
103 * 1 before signaling XON. The register is applicable only when
104 * per_class_guaranty_mode is set. */
105#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254
106/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
107 * register is applicable only when per_class_guaranty_mode is set. */
108#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248
109/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
110 * before signaling XON. The register is applicable only when
111 * per_class_guaranty_mode is set. */
112#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258
113/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
114 * is applicable only when per_class_guaranty_mode is set. */
115#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c
116/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
117 * 1 before signaling XON. The register is applicable only when
118 * per_class_guaranty_mode is set. */
119#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c
120/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
121 * register is applicable only when per_class_guaranty_mode is set. */
122#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250
123/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
124 * 1 before signaling XON. The register is applicable only when
125 * per_class_guaranty_mode is set. */
126#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260
127/* [RW 11] The number of blocks guarantied for the MAC port. The register is
128 * applicable only when per_class_guaranty_mode is reset. */
129#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
130#define BRB1_REG_MAC_GUARANTIED_1 0x60240
131/* [R 24] The number of full blocks. */
132#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
133/* [ST 32] The number of cycles that the write_full signal towards MAC #0
134 was asserted. */
135#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
136#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
137#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
138/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
139 asserted. */
140#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
141#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
142/* [RW 10] The number of free blocks below which the pause signal to class 0
143 * is asserted */
144#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
145#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220
146/* [RW 11] The number of free blocks above which the pause signal to class 0
147 * is de-asserted */
148#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
149#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224
150/* [RW 11] The number of free blocks below which the pause signal to class 1
151 * is asserted */
152#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
153#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228
154/* [RW 11] The number of free blocks above which the pause signal to class 1
155 * is de-asserted */
156#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
157#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c
158/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
159#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
160#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
161/* [RW 10] Write client 0: Assert pause threshold. */
162#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
163#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
164/* [R 24] The number of full blocks occupied by port. */
165#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
166/* [RW 1] Reset the design by software. */
167#define BRB1_REG_SOFT_RESET 0x600dc
168/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
169#define CCM_REG_CAM_OCCUP 0xd0188
170/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
171 acknowledge output is deasserted; all other signals are treated as usual;
172 if 1 - normal activity. */
173#define CCM_REG_CCM_CFC_IFEN 0xd003c
174/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
175 disregarded; valid is deasserted; all other signals are treated as usual;
176 if 1 - normal activity. */
177#define CCM_REG_CCM_CQM_IFEN 0xd000c
178/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
179 Otherwise 0 is inserted. */
180#define CCM_REG_CCM_CQM_USE_Q 0xd00c0
181/* [RW 11] Interrupt mask register #0 read/write */
182#define CCM_REG_CCM_INT_MASK 0xd01e4
183/* [R 11] Interrupt register #0 read */
184#define CCM_REG_CCM_INT_STS 0xd01d8
185/* [RW 27] Parity mask register #0 read/write */
186#define CCM_REG_CCM_PRTY_MASK 0xd01f4
187/* [R 27] Parity register #0 read */
188#define CCM_REG_CCM_PRTY_STS 0xd01e8
189/* [RC 27] Parity register #0 read clear */
190#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
191/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
192 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
193 Is used to determine the number of the AG context REG-pairs written back;
194 when the input message Reg1WbFlg isn't set. */
195#define CCM_REG_CCM_REG0_SZ 0xd00c4
196/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
197 disregarded; valid is deasserted; all other signals are treated as usual;
198 if 1 - normal activity. */
199#define CCM_REG_CCM_STORM0_IFEN 0xd0004
200/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
201 disregarded; valid is deasserted; all other signals are treated as usual;
202 if 1 - normal activity. */
203#define CCM_REG_CCM_STORM1_IFEN 0xd0008
204/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
205 disregarded; valid output is deasserted; all other signals are treated as
206 usual; if 1 - normal activity. */
207#define CCM_REG_CDU_AG_RD_IFEN 0xd0030
208/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
209 are disregarded; all other signals are treated as usual; if 1 - normal
210 activity. */
211#define CCM_REG_CDU_AG_WR_IFEN 0xd002c
212/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
213 disregarded; valid output is deasserted; all other signals are treated as
214 usual; if 1 - normal activity. */
215#define CCM_REG_CDU_SM_RD_IFEN 0xd0038
216/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
217 input is disregarded; all other signals are treated as usual; if 1 -
218 normal activity. */
219#define CCM_REG_CDU_SM_WR_IFEN 0xd0034
220/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
221 the initial credit value; read returns the current value of the credit
222 counter. Must be initialized to 1 at start-up. */
223#define CCM_REG_CFC_INIT_CRD 0xd0204
224/* [RW 2] Auxiliary counter flag Q number 1. */
225#define CCM_REG_CNT_AUX1_Q 0xd00c8
226/* [RW 2] Auxiliary counter flag Q number 2. */
227#define CCM_REG_CNT_AUX2_Q 0xd00cc
228/* [RW 28] The CM header value for QM request (primary). */
229#define CCM_REG_CQM_CCM_HDR_P 0xd008c
230/* [RW 28] The CM header value for QM request (secondary). */
231#define CCM_REG_CQM_CCM_HDR_S 0xd0090
232/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
233 acknowledge output is deasserted; all other signals are treated as usual;
234 if 1 - normal activity. */
235#define CCM_REG_CQM_CCM_IFEN 0xd0014
236/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
237 the initial credit value; read returns the current value of the credit
238 counter. Must be initialized to 32 at start-up. */
239#define CCM_REG_CQM_INIT_CRD 0xd020c
240/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
241 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
242 prioritised); 2 stands for weight 2; tc. */
243#define CCM_REG_CQM_P_WEIGHT 0xd00b8
244/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
245 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
246 prioritised); 2 stands for weight 2; tc. */
247#define CCM_REG_CQM_S_WEIGHT 0xd00bc
248/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
249 acknowledge output is deasserted; all other signals are treated as usual;
250 if 1 - normal activity. */
251#define CCM_REG_CSDM_IFEN 0xd0018
252/* [RC 1] Set when the message length mismatch (relative to last indication)
253 at the SDM interface is detected. */
254#define CCM_REG_CSDM_LENGTH_MIS 0xd0170
255/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
256 weight 8 (the most prioritised); 1 stands for weight 1(least
257 prioritised); 2 stands for weight 2; tc. */
258#define CCM_REG_CSDM_WEIGHT 0xd00b4
259/* [RW 28] The CM header for QM formatting in case of an error in the QM
260 inputs. */
261#define CCM_REG_ERR_CCM_HDR 0xd0094
262/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
263#define CCM_REG_ERR_EVNT_ID 0xd0098
264/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
265 writes the initial credit value; read returns the current value of the
266 credit counter. Must be initialized to 64 at start-up. */
267#define CCM_REG_FIC0_INIT_CRD 0xd0210
268/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
269 writes the initial credit value; read returns the current value of the
270 credit counter. Must be initialized to 64 at start-up. */
271#define CCM_REG_FIC1_INIT_CRD 0xd0214
272/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
273 - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
274 ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
275 ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
276 outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
277#define CCM_REG_GR_ARB_TYPE 0xd015c
278/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
279 highest priority is 3. It is supposed; that the Store channel priority is
280 the compliment to 4 of the rest priorities - Aggregation channel; Load
281 (FIC0) channel and Load (FIC1). */
282#define CCM_REG_GR_LD0_PR 0xd0164
283/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
284 highest priority is 3. It is supposed; that the Store channel priority is
285 the compliment to 4 of the rest priorities - Aggregation channel; Load
286 (FIC0) channel and Load (FIC1). */
287#define CCM_REG_GR_LD1_PR 0xd0168
288/* [RW 2] General flags index. */
289#define CCM_REG_INV_DONE_Q 0xd0108
290/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
291 context and sent to STORM; for a specific connection type. The double
292 REG-pairs are used in order to align to STORM context row size of 128
293 bits. The offset of these data in the STORM context is always 0. Index
294 _(0..15) stands for the connection type (one of 16). */
295#define CCM_REG_N_SM_CTX_LD_0 0xd004c
296#define CCM_REG_N_SM_CTX_LD_1 0xd0050
297#define CCM_REG_N_SM_CTX_LD_2 0xd0054
298#define CCM_REG_N_SM_CTX_LD_3 0xd0058
299#define CCM_REG_N_SM_CTX_LD_4 0xd005c
300/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
301 acknowledge output is deasserted; all other signals are treated as usual;
302 if 1 - normal activity. */
303#define CCM_REG_PBF_IFEN 0xd0028
304/* [RC 1] Set when the message length mismatch (relative to last indication)
305 at the pbf interface is detected. */
306#define CCM_REG_PBF_LENGTH_MIS 0xd0180
307/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
308 weight 8 (the most prioritised); 1 stands for weight 1(least
309 prioritised); 2 stands for weight 2; tc. */
310#define CCM_REG_PBF_WEIGHT 0xd00ac
311#define CCM_REG_PHYS_QNUM1_0 0xd0134
312#define CCM_REG_PHYS_QNUM1_1 0xd0138
313#define CCM_REG_PHYS_QNUM2_0 0xd013c
314#define CCM_REG_PHYS_QNUM2_1 0xd0140
315#define CCM_REG_PHYS_QNUM3_0 0xd0144
316#define CCM_REG_PHYS_QNUM3_1 0xd0148
317#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
318#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
319#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
320#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
321#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
322#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
323#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
324#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
325/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
326 disregarded; acknowledge output is deasserted; all other signals are
327 treated as usual; if 1 - normal activity. */
328#define CCM_REG_STORM_CCM_IFEN 0xd0010
329/* [RC 1] Set when the message length mismatch (relative to last indication)
330 at the STORM interface is detected. */
331#define CCM_REG_STORM_LENGTH_MIS 0xd016c
332/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
333 mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
334 weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
335 tc. */
336#define CCM_REG_STORM_WEIGHT 0xd009c
337/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
338 disregarded; acknowledge output is deasserted; all other signals are
339 treated as usual; if 1 - normal activity. */
340#define CCM_REG_TSEM_IFEN 0xd001c
341/* [RC 1] Set when the message length mismatch (relative to last indication)
342 at the tsem interface is detected. */
343#define CCM_REG_TSEM_LENGTH_MIS 0xd0174
344/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
345 weight 8 (the most prioritised); 1 stands for weight 1(least
346 prioritised); 2 stands for weight 2; tc. */
347#define CCM_REG_TSEM_WEIGHT 0xd00a0
348/* [RW 1] Input usem Interface enable. If 0 - the valid input is
349 disregarded; acknowledge output is deasserted; all other signals are
350 treated as usual; if 1 - normal activity. */
351#define CCM_REG_USEM_IFEN 0xd0024
352/* [RC 1] Set when message length mismatch (relative to last indication) at
353 the usem interface is detected. */
354#define CCM_REG_USEM_LENGTH_MIS 0xd017c
355/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
356 weight 8 (the most prioritised); 1 stands for weight 1(least
357 prioritised); 2 stands for weight 2; tc. */
358#define CCM_REG_USEM_WEIGHT 0xd00a8
359/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
360 disregarded; acknowledge output is deasserted; all other signals are
361 treated as usual; if 1 - normal activity. */
362#define CCM_REG_XSEM_IFEN 0xd0020
363/* [RC 1] Set when the message length mismatch (relative to last indication)
364 at the xsem interface is detected. */
365#define CCM_REG_XSEM_LENGTH_MIS 0xd0178
366/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
367 weight 8 (the most prioritised); 1 stands for weight 1(least
368 prioritised); 2 stands for weight 2; tc. */
369#define CCM_REG_XSEM_WEIGHT 0xd00a4
370/* [RW 19] Indirect access to the descriptor table of the XX protection
371 mechanism. The fields are: [5:0] - message length; [12:6] - message
372 pointer; 18:13] - next pointer. */
373#define CCM_REG_XX_DESCR_TABLE 0xd0300
374#define CCM_REG_XX_DESCR_TABLE_SIZE 24
375/* [R 7] Used to read the value of XX protection Free counter. */
376#define CCM_REG_XX_FREE 0xd0184
377/* [RW 6] Initial value for the credit counter; responsible for fulfilling
378 of the Input Stage XX protection buffer by the XX protection pending
379 messages. Max credit available - 127. Write writes the initial credit
380 value; read returns the current value of the credit counter. Must be
381 initialized to maximum XX protected message size - 2 at start-up. */
382#define CCM_REG_XX_INIT_CRD 0xd0220
383/* [RW 7] The maximum number of pending messages; which may be stored in XX
384 protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
385 At write comprises the start value of the ~ccm_registers_xx_free.xx_free
386 counter. */
387#define CCM_REG_XX_MSG_NUM 0xd0224
388/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
389#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044
390/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
391 The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
392 header pointer. */
393#define CCM_REG_XX_TABLE 0xd0280
394#define CDU_REG_CDU_CHK_MASK0 0x101000
395#define CDU_REG_CDU_CHK_MASK1 0x101004
396#define CDU_REG_CDU_CONTROL0 0x101008
397#define CDU_REG_CDU_DEBUG 0x101010
398#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020
399/* [RW 7] Interrupt mask register #0 read/write */
400#define CDU_REG_CDU_INT_MASK 0x10103c
401/* [R 7] Interrupt register #0 read */
402#define CDU_REG_CDU_INT_STS 0x101030
403/* [RW 5] Parity mask register #0 read/write */
404#define CDU_REG_CDU_PRTY_MASK 0x10104c
405/* [R 5] Parity register #0 read */
406#define CDU_REG_CDU_PRTY_STS 0x101040
407/* [RC 5] Parity register #0 read clear */
408#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
409/* [RC 32] logging of error data in case of a CDU load error:
410 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
411 ype_error; ctual_active; ctual_compressed_context}; */
412#define CDU_REG_ERROR_DATA 0x101014
413/* [WB 216] L1TT ram access. each entry has the following format :
414 {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
415 ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
416#define CDU_REG_L1TT 0x101800
417/* [WB 24] MATT ram access. each entry has the following
418 format:{RegionLength[11:0]; egionOffset[11:0]} */
419#define CDU_REG_MATT 0x101100
420/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
421#define CDU_REG_MF_MODE 0x101050
422/* [R 1] indication the initializing the activity counter by the hardware
423 was done. */
424#define CFC_REG_AC_INIT_DONE 0x104078
425/* [RW 13] activity counter ram access */
426#define CFC_REG_ACTIVITY_COUNTER 0x104400
427#define CFC_REG_ACTIVITY_COUNTER_SIZE 256
428/* [R 1] indication the initializing the cams by the hardware was done. */
429#define CFC_REG_CAM_INIT_DONE 0x10407c
430/* [RW 2] Interrupt mask register #0 read/write */
431#define CFC_REG_CFC_INT_MASK 0x104108
432/* [R 2] Interrupt register #0 read */
433#define CFC_REG_CFC_INT_STS 0x1040fc
434/* [RC 2] Interrupt register #0 read clear */
435#define CFC_REG_CFC_INT_STS_CLR 0x104100
436/* [RW 4] Parity mask register #0 read/write */
437#define CFC_REG_CFC_PRTY_MASK 0x104118
438/* [R 4] Parity register #0 read */
439#define CFC_REG_CFC_PRTY_STS 0x10410c
440/* [RC 4] Parity register #0 read clear */
441#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
442/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
443#define CFC_REG_CID_CAM 0x104800
444#define CFC_REG_CONTROL0 0x104028
445#define CFC_REG_DEBUG0 0x104050
446/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
447 vector) whether the cfc should be disabled upon it */
448#define CFC_REG_DISABLE_ON_ERROR 0x104044
449/* [RC 14] CFC error vector. when the CFC detects an internal error it will
450 set one of these bits. the bit description can be found in CFC
451 specifications */
452#define CFC_REG_ERROR_VECTOR 0x10403c
453/* [WB 93] LCID info ram access */
454#define CFC_REG_INFO_RAM 0x105000
455#define CFC_REG_INFO_RAM_SIZE 1024
456#define CFC_REG_INIT_REG 0x10404c
457#define CFC_REG_INTERFACES 0x104058
458/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
459 field allows changing the priorities of the weighted-round-robin arbiter
460 which selects which CFC load client should be served next */
461#define CFC_REG_LCREQ_WEIGHTS 0x104084
462/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
463#define CFC_REG_LINK_LIST 0x104c00
464#define CFC_REG_LINK_LIST_SIZE 256
465/* [R 1] indication the initializing the link list by the hardware was done. */
466#define CFC_REG_LL_INIT_DONE 0x104074
467/* [R 9] Number of allocated LCIDs which are at empty state */
468#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
469/* [R 9] Number of Arriving LCIDs in Link List Block */
470#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
471#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120
472/* [R 9] Number of Leaving LCIDs in Link List Block */
473#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
474#define CFC_REG_WEAK_ENABLE_PF 0x104124
475/* [RW 8] The event id for aggregated interrupt 0 */
476#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
477#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
478#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
479#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
480#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
481#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
482#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
483#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
484#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
485#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
486#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
487#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
488#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
489#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
490#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
491#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
492/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
493 or auto-mask-mode (1) */
494#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
495#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
496#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
497#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
498#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
499#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
500#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
501#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
502#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
503#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
504#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
505/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
506#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
507/* [RW 16] The maximum value of the completion counter #0 */
508#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
509/* [RW 16] The maximum value of the completion counter #1 */
510#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
511/* [RW 16] The maximum value of the completion counter #2 */
512#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
513/* [RW 16] The maximum value of the completion counter #3 */
514#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
515/* [RW 13] The start address in the internal RAM for the completion
516 counters. */
517#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c
518/* [RW 32] Interrupt mask register #0 read/write */
519#define CSDM_REG_CSDM_INT_MASK_0 0xc229c
520#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
521/* [R 32] Interrupt register #0 read */
522#define CSDM_REG_CSDM_INT_STS_0 0xc2290
523#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
524/* [RW 11] Parity mask register #0 read/write */
525#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
526/* [R 11] Parity register #0 read */
527#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
528/* [RC 11] Parity register #0 read clear */
529#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
530#define CSDM_REG_ENABLE_IN1 0xc2238
531#define CSDM_REG_ENABLE_IN2 0xc223c
532#define CSDM_REG_ENABLE_OUT1 0xc2240
533#define CSDM_REG_ENABLE_OUT2 0xc2244
534/* [RW 4] The initial number of messages that can be sent to the pxp control
535 interface without receiving any ACK. */
536#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc
537/* [ST 32] The number of ACK after placement messages received */
538#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c
539/* [ST 32] The number of packet end messages received from the parser */
540#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274
541/* [ST 32] The number of requests received from the pxp async if */
542#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278
543/* [ST 32] The number of commands received in queue 0 */
544#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248
545/* [ST 32] The number of commands received in queue 10 */
546#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c
547/* [ST 32] The number of commands received in queue 11 */
548#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270
549/* [ST 32] The number of commands received in queue 1 */
550#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c
551/* [ST 32] The number of commands received in queue 3 */
552#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250
553/* [ST 32] The number of commands received in queue 4 */
554#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254
555/* [ST 32] The number of commands received in queue 5 */
556#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258
557/* [ST 32] The number of commands received in queue 6 */
558#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c
559/* [ST 32] The number of commands received in queue 7 */
560#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260
561/* [ST 32] The number of commands received in queue 8 */
562#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264
563/* [ST 32] The number of commands received in queue 9 */
564#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268
565/* [RW 13] The start address in the internal RAM for queue counters */
566#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010
567/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
568#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548
569/* [R 1] parser fifo empty in sdm_sync block */
570#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550
571/* [R 1] parser serial fifo empty in sdm_sync block */
572#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558
573/* [RW 32] Tick for timer counter. Applicable only when
574 ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
575#define CSDM_REG_TIMER_TICK 0xc2000
576/* [RW 5] The number of time_slots in the arbitration cycle */
577#define CSEM_REG_ARB_CYCLE_SIZE 0x200034
578/* [RW 3] The source that is associated with arbitration element 0. Source
579 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
580 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
581#define CSEM_REG_ARB_ELEMENT0 0x200020
582/* [RW 3] The source that is associated with arbitration element 1. Source
583 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
584 sleeping thread with priority 1; 4- sleeping thread with priority 2.
585 Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
586#define CSEM_REG_ARB_ELEMENT1 0x200024
587/* [RW 3] The source that is associated with arbitration element 2. Source
588 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
589 sleeping thread with priority 1; 4- sleeping thread with priority 2.
590 Could not be equal to register ~csem_registers_arb_element0.arb_element0
591 and ~csem_registers_arb_element1.arb_element1 */
592#define CSEM_REG_ARB_ELEMENT2 0x200028
593/* [RW 3] The source that is associated with arbitration element 3. Source
594 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
595 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
596 not be equal to register ~csem_registers_arb_element0.arb_element0 and
597 ~csem_registers_arb_element1.arb_element1 and
598 ~csem_registers_arb_element2.arb_element2 */
599#define CSEM_REG_ARB_ELEMENT3 0x20002c
600/* [RW 3] The source that is associated with arbitration element 4. Source
601 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
602 sleeping thread with priority 1; 4- sleeping thread with priority 2.
603 Could not be equal to register ~csem_registers_arb_element0.arb_element0
604 and ~csem_registers_arb_element1.arb_element1 and
605 ~csem_registers_arb_element2.arb_element2 and
606 ~csem_registers_arb_element3.arb_element3 */
607#define CSEM_REG_ARB_ELEMENT4 0x200030
608/* [RW 32] Interrupt mask register #0 read/write */
609#define CSEM_REG_CSEM_INT_MASK_0 0x200110
610#define CSEM_REG_CSEM_INT_MASK_1 0x200120
611/* [R 32] Interrupt register #0 read */
612#define CSEM_REG_CSEM_INT_STS_0 0x200104
613#define CSEM_REG_CSEM_INT_STS_1 0x200114
614/* [RW 32] Parity mask register #0 read/write */
615#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
616#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
617/* [R 32] Parity register #0 read */
618#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
619#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
620/* [RC 32] Parity register #0 read clear */
621#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
622#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
623#define CSEM_REG_ENABLE_IN 0x2000a4
624#define CSEM_REG_ENABLE_OUT 0x2000a8
625/* [RW 32] This address space contains all registers and memories that are
626 placed in SEM_FAST block. The SEM_FAST registers are described in
627 appendix B. In order to access the sem_fast registers the base address
628 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
629#define CSEM_REG_FAST_MEMORY 0x220000
630/* [RW 1] Disables input messages from FIC0 May be updated during run_time
631 by the microcode */
632#define CSEM_REG_FIC0_DISABLE 0x200224
633/* [RW 1] Disables input messages from FIC1 May be updated during run_time
634 by the microcode */
635#define CSEM_REG_FIC1_DISABLE 0x200234
636/* [RW 15] Interrupt table Read and write access to it is not possible in
637 the middle of the work */
638#define CSEM_REG_INT_TABLE 0x200400
639/* [ST 24] Statistics register. The number of messages that entered through
640 FIC0 */
641#define CSEM_REG_MSG_NUM_FIC0 0x200000
642/* [ST 24] Statistics register. The number of messages that entered through
643 FIC1 */
644#define CSEM_REG_MSG_NUM_FIC1 0x200004
645/* [ST 24] Statistics register. The number of messages that were sent to
646 FOC0 */
647#define CSEM_REG_MSG_NUM_FOC0 0x200008
648/* [ST 24] Statistics register. The number of messages that were sent to
649 FOC1 */
650#define CSEM_REG_MSG_NUM_FOC1 0x20000c
651/* [ST 24] Statistics register. The number of messages that were sent to
652 FOC2 */
653#define CSEM_REG_MSG_NUM_FOC2 0x200010
654/* [ST 24] Statistics register. The number of messages that were sent to
655 FOC3 */
656#define CSEM_REG_MSG_NUM_FOC3 0x200014
657/* [RW 1] Disables input messages from the passive buffer May be updated
658 during run_time by the microcode */
659#define CSEM_REG_PAS_DISABLE 0x20024c
660/* [WB 128] Debug only. Passive buffer memory */
661#define CSEM_REG_PASSIVE_BUFFER 0x202000
662/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
663#define CSEM_REG_PRAM 0x240000
664/* [R 16] Valid sleeping threads indication have bit per thread */
665#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c
666/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
667#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0
668/* [RW 16] List of free threads . There is a bit per thread. */
669#define CSEM_REG_THREADS_LIST 0x2002e4
670/* [RW 3] The arbitration scheme of time_slot 0 */
671#define CSEM_REG_TS_0_AS 0x200038
672/* [RW 3] The arbitration scheme of time_slot 10 */
673#define CSEM_REG_TS_10_AS 0x200060
674/* [RW 3] The arbitration scheme of time_slot 11 */
675#define CSEM_REG_TS_11_AS 0x200064
676/* [RW 3] The arbitration scheme of time_slot 12 */
677#define CSEM_REG_TS_12_AS 0x200068
678/* [RW 3] The arbitration scheme of time_slot 13 */
679#define CSEM_REG_TS_13_AS 0x20006c
680/* [RW 3] The arbitration scheme of time_slot 14 */
681#define CSEM_REG_TS_14_AS 0x200070
682/* [RW 3] The arbitration scheme of time_slot 15 */
683#define CSEM_REG_TS_15_AS 0x200074
684/* [RW 3] The arbitration scheme of time_slot 16 */
685#define CSEM_REG_TS_16_AS 0x200078
686/* [RW 3] The arbitration scheme of time_slot 17 */
687#define CSEM_REG_TS_17_AS 0x20007c
688/* [RW 3] The arbitration scheme of time_slot 18 */
689#define CSEM_REG_TS_18_AS 0x200080
690/* [RW 3] The arbitration scheme of time_slot 1 */
691#define CSEM_REG_TS_1_AS 0x20003c
692/* [RW 3] The arbitration scheme of time_slot 2 */
693#define CSEM_REG_TS_2_AS 0x200040
694/* [RW 3] The arbitration scheme of time_slot 3 */
695#define CSEM_REG_TS_3_AS 0x200044
696/* [RW 3] The arbitration scheme of time_slot 4 */
697#define CSEM_REG_TS_4_AS 0x200048
698/* [RW 3] The arbitration scheme of time_slot 5 */
699#define CSEM_REG_TS_5_AS 0x20004c
700/* [RW 3] The arbitration scheme of time_slot 6 */
701#define CSEM_REG_TS_6_AS 0x200050
702/* [RW 3] The arbitration scheme of time_slot 7 */
703#define CSEM_REG_TS_7_AS 0x200054
704/* [RW 3] The arbitration scheme of time_slot 8 */
705#define CSEM_REG_TS_8_AS 0x200058
706/* [RW 3] The arbitration scheme of time_slot 9 */
707#define CSEM_REG_TS_9_AS 0x20005c
708/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
709 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
710#define CSEM_REG_VFPF_ERR_NUM 0x200380
711/* [RW 1] Parity mask register #0 read/write */
712#define DBG_REG_DBG_PRTY_MASK 0xc0a8
713/* [R 1] Parity register #0 read */
714#define DBG_REG_DBG_PRTY_STS 0xc09c
715/* [RC 1] Parity register #0 read clear */
716#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
717/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
718 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
719 * 4.Completion function=0; 5.Error handling=0 */
720#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
721/* [RW 32] Commands memory. The address to command X; row Y is to calculated
722 as 14*X+Y. */
723#define DMAE_REG_CMD_MEM 0x102400
724#define DMAE_REG_CMD_MEM_SIZE 224
725/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
726 initial value is all ones. */
727#define DMAE_REG_CRC16C_INIT 0x10201c
728/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
729 CRC-16 T10 initial value is all ones. */
730#define DMAE_REG_CRC16T10_INIT 0x102020
731/* [RW 2] Interrupt mask register #0 read/write */
732#define DMAE_REG_DMAE_INT_MASK 0x102054
733/* [RW 4] Parity mask register #0 read/write */
734#define DMAE_REG_DMAE_PRTY_MASK 0x102064
735/* [R 4] Parity register #0 read */
736#define DMAE_REG_DMAE_PRTY_STS 0x102058
737/* [RC 4] Parity register #0 read clear */
738#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
739/* [RW 1] Command 0 go. */
740#define DMAE_REG_GO_C0 0x102080
741/* [RW 1] Command 1 go. */
742#define DMAE_REG_GO_C1 0x102084
743/* [RW 1] Command 10 go. */
744#define DMAE_REG_GO_C10 0x102088
745/* [RW 1] Command 11 go. */
746#define DMAE_REG_GO_C11 0x10208c
747/* [RW 1] Command 12 go. */
748#define DMAE_REG_GO_C12 0x102090
749/* [RW 1] Command 13 go. */
750#define DMAE_REG_GO_C13 0x102094
751/* [RW 1] Command 14 go. */
752#define DMAE_REG_GO_C14 0x102098
753/* [RW 1] Command 15 go. */
754#define DMAE_REG_GO_C15 0x10209c
755/* [RW 1] Command 2 go. */
756#define DMAE_REG_GO_C2 0x1020a0
757/* [RW 1] Command 3 go. */
758#define DMAE_REG_GO_C3 0x1020a4
759/* [RW 1] Command 4 go. */
760#define DMAE_REG_GO_C4 0x1020a8
761/* [RW 1] Command 5 go. */
762#define DMAE_REG_GO_C5 0x1020ac
763/* [RW 1] Command 6 go. */
764#define DMAE_REG_GO_C6 0x1020b0
765/* [RW 1] Command 7 go. */
766#define DMAE_REG_GO_C7 0x1020b4
767/* [RW 1] Command 8 go. */
768#define DMAE_REG_GO_C8 0x1020b8
769/* [RW 1] Command 9 go. */
770#define DMAE_REG_GO_C9 0x1020bc
771/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
772 input is disregarded; valid is deasserted; all other signals are treated
773 as usual; if 1 - normal activity. */
774#define DMAE_REG_GRC_IFEN 0x102008
775/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
776 acknowledge input is disregarded; valid is deasserted; full is asserted;
777 all other signals are treated as usual; if 1 - normal activity. */
778#define DMAE_REG_PCI_IFEN 0x102004
779/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
780 initial value to the credit counter; related to the address. Read returns
781 the current value of the counter. */
782#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0
783/* [RW 8] Aggregation command. */
784#define DORQ_REG_AGG_CMD0 0x170060
785/* [RW 8] Aggregation command. */
786#define DORQ_REG_AGG_CMD1 0x170064
787/* [RW 8] Aggregation command. */
788#define DORQ_REG_AGG_CMD2 0x170068
789/* [RW 8] Aggregation command. */
790#define DORQ_REG_AGG_CMD3 0x17006c
791/* [RW 28] UCM Header. */
792#define DORQ_REG_CMHEAD_RX 0x170050
793/* [RW 32] Doorbell address for RBC doorbells (function 0). */
794#define DORQ_REG_DB_ADDR0 0x17008c
795/* [RW 5] Interrupt mask register #0 read/write */
796#define DORQ_REG_DORQ_INT_MASK 0x170180
797/* [R 5] Interrupt register #0 read */
798#define DORQ_REG_DORQ_INT_STS 0x170174
799/* [RC 5] Interrupt register #0 read clear */
800#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
801/* [RW 2] Parity mask register #0 read/write */
802#define DORQ_REG_DORQ_PRTY_MASK 0x170190
803/* [R 2] Parity register #0 read */
804#define DORQ_REG_DORQ_PRTY_STS 0x170184
805/* [RC 2] Parity register #0 read clear */
806#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
807/* [RW 8] The address to write the DPM CID to STORM. */
808#define DORQ_REG_DPM_CID_ADDR 0x170044
809/* [RW 5] The DPM mode CID extraction offset. */
810#define DORQ_REG_DPM_CID_OFST 0x170030
811/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
812#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c
813/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
814#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078
815/* [R 13] Current value of the DQ FIFO fill level according to following
816 pointer. The range is 0 - 256 FIFO rows; where each row stands for the
817 doorbell. */
818#define DORQ_REG_DQ_FILL_LVLF 0x1700a4
819/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
820 equal to full threshold; reset on full clear. */
821#define DORQ_REG_DQ_FULL_ST 0x1700c0
822/* [RW 28] The value sent to CM header in the case of CFC load error. */
823#define DORQ_REG_ERR_CMHEAD 0x170058
824#define DORQ_REG_IF_EN 0x170004
825#define DORQ_REG_MODE_ACT 0x170008
826/* [RW 5] The normal mode CID extraction offset. */
827#define DORQ_REG_NORM_CID_OFST 0x17002c
828/* [RW 28] TCM Header when only TCP context is loaded. */
829#define DORQ_REG_NORM_CMHEAD_TX 0x17004c
830/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
831 Interface. */
832#define DORQ_REG_OUTST_REQ 0x17003c
833#define DORQ_REG_PF_USAGE_CNT 0x1701d0
834#define DORQ_REG_REGN 0x170038
835/* [R 4] Current value of response A counter credit. Initial credit is
836 configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
837 register. */
838#define DORQ_REG_RSPA_CRD_CNT 0x1700ac
839/* [R 4] Current value of response B counter credit. Initial credit is
840 configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
841 register. */
842#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
843/* [RW 4] The initial credit at the Doorbell Response Interface. The write
844 writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
845 read reads this written value. */
846#define DORQ_REG_RSP_INIT_CRD 0x170048
847/* [RW 4] Initial activity counter value on the load request; when the
848 shortcut is done. */
849#define DORQ_REG_SHRT_ACT_CNT 0x170070
850/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
851#define DORQ_REG_SHRT_CMHEAD 0x170054
852#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
853#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0)
854#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
855#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
856#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
857#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
858#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
859#define HC_REG_AGG_INT_0 0x108050
860#define HC_REG_AGG_INT_1 0x108054
861#define HC_REG_ATTN_BIT 0x108120
862#define HC_REG_ATTN_IDX 0x108100
863#define HC_REG_ATTN_MSG0_ADDR_L 0x108018
864#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
865#define HC_REG_ATTN_NUM_P0 0x108038
866#define HC_REG_ATTN_NUM_P1 0x10803c
867#define HC_REG_COMMAND_REG 0x108180
868#define HC_REG_CONFIG_0 0x108000
869#define HC_REG_CONFIG_1 0x108004
870#define HC_REG_FUNC_NUM_P0 0x1080ac
871#define HC_REG_FUNC_NUM_P1 0x1080b0
872/* [RW 3] Parity mask register #0 read/write */
873#define HC_REG_HC_PRTY_MASK 0x1080a0
874/* [R 3] Parity register #0 read */
875#define HC_REG_HC_PRTY_STS 0x108094
876/* [RC 3] Parity register #0 read clear */
877#define HC_REG_HC_PRTY_STS_CLR 0x108098
878#define HC_REG_INT_MASK 0x108108
879#define HC_REG_LEADING_EDGE_0 0x108040
880#define HC_REG_LEADING_EDGE_1 0x108048
881#define HC_REG_MAIN_MEMORY 0x108800
882#define HC_REG_MAIN_MEMORY_SIZE 152
883#define HC_REG_P0_PROD_CONS 0x108200
884#define HC_REG_P1_PROD_CONS 0x108400
885#define HC_REG_PBA_COMMAND 0x108140
886#define HC_REG_PCI_CONFIG_0 0x108010
887#define HC_REG_PCI_CONFIG_1 0x108014
888#define HC_REG_STATISTIC_COUNTERS 0x109000
889#define HC_REG_TRAILING_EDGE_0 0x108044
890#define HC_REG_TRAILING_EDGE_1 0x10804c
891#define HC_REG_UC_RAM_ADDR_0 0x108028
892#define HC_REG_UC_RAM_ADDR_1 0x108030
893#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
894#define HC_REG_VQID_0 0x108008
895#define HC_REG_VQID_1 0x10800c
896#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
897#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0)
898#define IGU_REG_ATTENTION_ACK_BITS 0x130108
899/* [R 4] Debug: attn_fsm */
900#define IGU_REG_ATTN_FSM 0x130054
901#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
902#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
903/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
904 * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
905 * write done didn't receive. */
906#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
907#define IGU_REG_BLOCK_CONFIGURATION 0x130000
908#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
909#define IGU_REG_COMMAND_REG_CTRL 0x13012c
910/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
911 * is clear. The bits in this registers are set and clear via the producer
912 * command. Data valid only in addresses 0-4. all the rest are zero. */
913#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
914/* [R 5] Debug: ctrl_fsm */
915#define IGU_REG_CTRL_FSM 0x130064
916/* [R 1] data available for error memory. If this bit is clear do not red
917 * from error_handling_memory. */
918#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
919/* [RW 11] Parity mask register #0 read/write */
920#define IGU_REG_IGU_PRTY_MASK 0x1300a8
921/* [R 11] Parity register #0 read */
922#define IGU_REG_IGU_PRTY_STS 0x13009c
923/* [RC 11] Parity register #0 read clear */
924#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
925/* [R 4] Debug: int_handle_fsm */
926#define IGU_REG_INT_HANDLE_FSM 0x130050
927#define IGU_REG_LEADING_EDGE_LATCH 0x130134
928/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
929 * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
930 * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
931#define IGU_REG_MAPPING_MEMORY 0x131000
932#define IGU_REG_MAPPING_MEMORY_SIZE 136
933#define IGU_REG_PBA_STATUS_LSB 0x130138
934#define IGU_REG_PBA_STATUS_MSB 0x13013c
935#define IGU_REG_PCI_PF_MSI_EN 0x130140
936#define IGU_REG_PCI_PF_MSIX_EN 0x130144
937#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
938/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
939 * pending; 1 = pending. Pendings means interrupt was asserted; and write
940 * done was not received. Data valid only in addresses 0-4. all the rest are
941 * zero. */
942#define IGU_REG_PENDING_BITS_STATUS 0x130300
943#define IGU_REG_PF_CONFIGURATION 0x130154
944/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
945 * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
946 * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
947 * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
948 * - In backward compatible mode; for non default SB; each even line in the
949 * memory holds the U producer and each odd line hold the C producer. The
950 * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
951 * last 20 producers are for the DSB for each PF. each PF has five segments
952 * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
953 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
954#define IGU_REG_PROD_CONS_MEMORY 0x132000
955/* [R 3] Debug: pxp_arb_fsm */
956#define IGU_REG_PXP_ARB_FSM 0x130068
957/* [RW 6] Write one for each bit will reset the appropriate memory. When the
958 * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
959 * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
960 * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
961#define IGU_REG_RESET_MEMORIES 0x130158
962/* [R 4] Debug: sb_ctrl_fsm */
963#define IGU_REG_SB_CTRL_FSM 0x13004c
964#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
965#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
966#define IGU_REG_SB_MASK_LSB 0x130164
967#define IGU_REG_SB_MASK_MSB 0x130168
968/* [RW 16] Number of command that were dropped without causing an interrupt
969 * due to: read access for WO BAR address; or write access for RO BAR
970 * address or any access for reserved address or PCI function error is set
971 * and address is not MSIX; PBA or cleanup */
972#define IGU_REG_SILENT_DROP 0x13016c
973/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
974 * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
975 * PF; 68-71 number of ATTN messages per PF */
976#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
977/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
978 * timer mask command arrives. Value must be bigger than 100. */
979#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
980#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
981#define IGU_REG_VF_CONFIGURATION 0x130170
982/* [WB_R 32] Each bit represent write done pending bits status for that SB
983 * (MSI/MSIX message was sent and write done was not received yet). 0 =
984 * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
985#define IGU_REG_WRITE_DONE_PENDING 0x130480
986#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
987#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
988#define MCP_REG_MCPR_GP_INPUTS 0x800c0
989#define MCP_REG_MCPR_GP_OENABLE 0x800c8
990#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4
991#define MCP_REG_MCPR_IMC_COMMAND 0x85900
992#define MCP_REG_MCPR_IMC_DATAREG0 0x85920
993#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904
994#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
995#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
996#define MCP_REG_MCPR_NVM_ADDR 0x8640c
997#define MCP_REG_MCPR_NVM_CFG4 0x8642c
998#define MCP_REG_MCPR_NVM_COMMAND 0x86400
999#define MCP_REG_MCPR_NVM_READ 0x86410
1000#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
1001#define MCP_REG_MCPR_NVM_WRITE 0x86408
1002#define MCP_REG_MCPR_SCRATCH 0xa0000
1003#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
1004#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
1005/* [R 32] read first 32 bit after inversion of function 0. mapped as
1006 follows: [0] NIG attention for function0; [1] NIG attention for
1007 function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
1008 [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
1009 GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
1010 glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
1011 [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
1012 MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
1013 Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
1014 interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
1015 error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
1016 interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
1017 Parity error; [31] PBF Hw interrupt; */
1018#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c
1019#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430
1020/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
1021 NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
1022 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
1023 [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
1024 PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
1025 function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
1026 Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
1027 mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
1028 BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
1029 Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
1030 interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
1031 Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
1032 interrupt; */
1033#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434
1034/* [R 32] read second 32 bit after inversion of function 0. mapped as
1035 follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
1036 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
1037 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
1038 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
1039 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1040 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1041 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1042 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1043 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1044 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1045 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1046 interrupt; */
1047#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438
1048#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c
1049/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
1050 PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
1051 [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
1052 [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
1053 XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
1054 DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
1055 error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
1056 PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
1057 [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
1058 [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
1059 [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
1060 [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
1061#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440
1062/* [R 32] read third 32 bit after inversion of function 0. mapped as
1063 follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
1064 error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
1065 PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1066 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1067 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1068 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1069 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1070 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1071 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1072 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1073 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1074 attn1; */
1075#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444
1076#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448
1077/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
1078 CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
1079 Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
1080 Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
1081 error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
1082 interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
1083 MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
1084 Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
1085 timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
1086 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
1087 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
1088 timers attn_4 func1; [30] General attn0; [31] General attn1; */
1089#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c
1090/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
1091 follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1092 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1093 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1094 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1095 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1096 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1097 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1098 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1099 Latched timeout attention; [27] GRC Latched reserved access attention;
1100 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1101 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1102#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450
1103#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454
1104/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
1105 General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
1106 [4] General attn6; [5] General attn7; [6] General attn8; [7] General
1107 attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
1108 General attn13; [12] General attn14; [13] General attn15; [14] General
1109 attn16; [15] General attn17; [16] General attn18; [17] General attn19;
1110 [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
1111 RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
1112 RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
1113 attention; [27] GRC Latched reserved access attention; [28] MCP Latched
1114 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
1115 ump_tx_parity; [31] MCP Latched scpad_parity; */
1116#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
1117/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
1118 * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1119 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1120 * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
1121#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
1122/* [W 14] write to this register results with the clear of the latched
1123 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
1124 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
1125 latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
1126 GRC Latched reserved access attention; one in d7 clears Latched
1127 rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
1128 Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
1129 ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
1130 pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
1131 from this register return zero */
1132#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
1133/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
1134 as follows: [0] NIG attention for function0; [1] NIG attention for
1135 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
1136 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
1137 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
1138 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
1139 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
1140 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
1141 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
1142 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
1143 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
1144 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
1145 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
1146#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
1147#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
1148#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
1149#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
1150#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
1151#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
1152#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
1153/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
1154 as follows: [0] NIG attention for function0; [1] NIG attention for
1155 function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
1156 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
1157 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
1158 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
1159 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
1160 SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
1161 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
1162 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
1163 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
1164 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
1165 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
1166#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
1167#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
1168#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
1169#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
1170#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
1171#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
1172#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
1173/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
1174 as follows: [0] NIG attention for function0; [1] NIG attention for
1175 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
1176 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
1177 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
1178 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
1179 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
1180 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
1181 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
1182 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
1183 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
1184 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
1185 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
1186#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
1187#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
1188/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
1189 as follows: [0] NIG attention for function0; [1] NIG attention for
1190 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
1191 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
1192 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
1193 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
1194 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
1195 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
1196 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
1197 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
1198 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
1199 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
1200 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
1201#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc
1202#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c
1203/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
1204 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
1205 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
1206 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
1207 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
1208 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1209 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1210 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1211 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1212 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1213 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1214 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1215 interrupt; */
1216#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070
1217#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080
1218/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
1219 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
1220 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
1221 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
1222 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
1223 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1224 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1225 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1226 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1227 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1228 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1229 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1230 interrupt; */
1231#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
1232#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
1233/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
1234 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
1235 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
1236 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
1237 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
1238 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1239 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1240 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1241 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1242 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1243 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1244 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1245 interrupt; */
1246#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
1247#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
1248/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
1249 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
1250 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
1251 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
1252 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
1253 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1254 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1255 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1256 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1257 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1258 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1259 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1260 interrupt; */
1261#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
1262#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
1263/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
1264 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1265 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1266 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1267 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1268 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1269 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1270 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1271 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1272 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1273 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1274 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1275 attn1; */
1276#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074
1277#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084
1278/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
1279 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1280 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1281 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1282 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1283 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1284 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1285 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1286 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1287 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1288 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1289 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1290 attn1; */
1291#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
1292#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
1293/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
1294 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1295 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1296 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1297 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1298 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1299 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1300 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1301 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1302 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1303 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1304 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1305 attn1; */
1306#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
1307#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
1308/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
1309 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1310 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1311 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1312 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1313 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1314 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1315 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1316 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1317 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1318 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1319 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1320 attn1; */
1321#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
1322#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
1323/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
1324 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1325 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1326 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1327 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1328 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1329 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1330 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1331 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1332 Latched timeout attention; [27] GRC Latched reserved access attention;
1333 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1334 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1335#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
1336#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
1337#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
1338#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
1339#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
1340#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
1341/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
1342 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1343 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1344 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1345 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1346 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1347 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1348 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1349 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1350 Latched timeout attention; [27] GRC Latched reserved access attention;
1351 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1352 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1353#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
1354#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
1355#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
1356#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
1357#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
1358#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
1359/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
1360 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1361 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1362 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1363 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1364 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1365 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1366 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1367 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1368 Latched timeout attention; [27] GRC Latched reserved access attention;
1369 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1370 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1371#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
1372#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
1373/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
1374 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1375 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1376 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1377 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1378 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1379 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1380 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1381 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1382 Latched timeout attention; [27] GRC Latched reserved access attention;
1383 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1384 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1385#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
1386#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
1387/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
1388 * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1389 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1390 * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
1391 * parity; [31-10] Reserved; */
1392#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
1393/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
1394 * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1395 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1396 * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
1397 * parity; [31-10] Reserved; */
1398#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
1399/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
1400 128 bit vector */
1401#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
1402#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004
1403#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
1404#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
1405#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
1406#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
1407#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c
1408#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
1409#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
1410#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
1411#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
1412#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
1413#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
1414#define MISC_REG_AEU_GENERAL_MASK 0xa61c
1415/* [RW 32] first 32b for inverting the input for function 0; for each bit:
1416 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
1417 function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
1418 [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
1419 [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
1420 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
1421 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
1422 SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
1423 for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
1424 Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
1425 interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
1426 Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
1427 Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
1428#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c
1429#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c
1430/* [RW 32] second 32b for inverting the input for function 0; for each bit:
1431 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
1432 error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
1433 interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
1434 Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
1435 interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
1436 DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
1437 error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
1438 PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
1439 [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
1440 [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
1441 [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
1442 [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
1443#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
1444#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
1445/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
1446 [9:8] = raserved. Zero = mask; one = unmask */
1447#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
1448#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
1449/* [RW 1] If set a system kill occurred */
1450#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
1451/* [RW 32] Represent the status of the input vector to the AEU when a system
1452 kill occurred. The register is reset in por reset. Mapped as follows: [0]
1453 NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
1454 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
1455 [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
1456 PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
1457 function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
1458 Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
1459 mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
1460 BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
1461 Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
1462 interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
1463 Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
1464 interrupt; */
1465#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
1466#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
1467#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
1468#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
1469/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
1470 Port. */
1471#define MISC_REG_BOND_ID 0xa400
1472/* [R 8] These bits indicate the metal revision of the chip. This value
1473 starts at 0x00 for each all-layer tape-out and increments by one for each
1474 tape-out. */
1475#define MISC_REG_CHIP_METAL 0xa404
1476/* [R 16] These bits indicate the part number for the chip. */
1477#define MISC_REG_CHIP_NUM 0xa408
1478/* [R 4] These bits indicate the base revision of the chip. This value
1479 starts at 0x0 for the A0 tape-out and increments by one for each
1480 all-layer tape-out. */
1481#define MISC_REG_CHIP_REV 0xa40c
1482/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1483 32 clients. Each client can be controlled by one driver only. One in each
1484 bit represent that this driver control the appropriate client (Ex: bit 5
1485 is set means this driver control client number 5). addr1 = set; addr0 =
1486 clear; read from both addresses will give the same result = status. write
1487 to address 1 will set a request to control all the clients that their
1488 appropriate bit (in the write command) is set. if the client is free (the
1489 appropriate bit in all the other drivers is clear) one will be written to
1490 that driver register; if the client isn't free the bit will remain zero.
1491 if the appropriate bit is set (the driver request to gain control on a
1492 client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
1493 interrupt will be asserted). write to address 0 will set a request to
1494 free all the clients that their appropriate bit (in the write command) is
1495 set. if the appropriate bit is clear (the driver request to free a client
1496 it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
1497 be asserted). */
1498#define MISC_REG_DRIVER_CONTROL_1 0xa510
1499#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
1500/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
1501 only. */
1502#define MISC_REG_E1HMF_MODE 0xa5f8
1503/* [R 1] Status of four port mode path swap input pin. */
1504#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c
1505/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
1506 the path_swap output is equal to 4 port mode path swap input pin; if it
1507 is 1 - the path_swap output is equal to bit[1] of this register; [1] -
1508 Overwrite value. If bit[0] of this register is 1 this is the value that
1509 receives the path_swap output. Reset on Hard reset. */
1510#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738
1511/* [R 1] Status of 4 port mode port swap input pin. */
1512#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754
1513/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
1514 the port_swap output is equal to 4 port mode port swap input pin; if it
1515 is 1 - the port_swap output is equal to bit[1] of this register; [1] -
1516 Overwrite value. If bit[0] of this register is 1 this is the value that
1517 receives the port_swap output. Reset on Hard reset. */
1518#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734
1519/* [RW 32] Debug only: spare RW register reset by core reset */
1520#define MISC_REG_GENERIC_CR_0 0xa460
1521#define MISC_REG_GENERIC_CR_1 0xa464
1522/* [RW 32] Debug only: spare RW register reset by por reset */
1523#define MISC_REG_GENERIC_POR_1 0xa474
1524/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
1525 use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
1526 can not be configured as an output. Each output has its output enable in
1527 the MCP register space; but this bit needs to be set to make use of that.
1528 Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
1529 set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
1530 When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
1531 the i/o to an output and will drive the TimeSync output. Bit[31:7]:
1532 spare. Global register. Reset by hard reset. */
1533#define MISC_REG_GEN_PURP_HWG 0xa9a0
1534/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
1535 these bits is written as a '1'; the corresponding SPIO bit will turn off
1536 it's drivers and become an input. This is the reset state of all GPIO
1537 pins. The read value of these bits will be a '1' if that last command
1538 (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
1539 [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
1540 as a '1'; the corresponding GPIO bit will drive low. The read value of
1541 these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
1542 this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
1543 SET When any of these bits is written as a '1'; the corresponding GPIO
1544 bit will drive high (if it has that capability). The read value of these
1545 bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
1546 bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
1547 RO; These bits indicate the read value of each of the eight GPIO pins.
1548 This is the result value of the pin; not the drive value. Writing these
1549 bits will have not effect. */
1550#define MISC_REG_GPIO 0xa490
1551/* [RW 8] These bits enable the GPIO_INTs to signals event to the
1552 IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
1553 p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
1554 [7] p1_gpio_3; */
1555#define MISC_REG_GPIO_EVENT_EN 0xa2bc
1556/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
1557 '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
1558 This will acknowledge an interrupt on the falling edge of corresponding
1559 GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
1560 Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
1561 register. This will acknowledge an interrupt on the rising edge of
1562 corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
1563 OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
1564 value. When the ~INT_STATE bit is set; this bit indicates the OLD value
1565 of the pin such that if ~INT_STATE is set and this bit is '0'; then the
1566 interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
1567 is '1'; then the interrupt is due to a high to low edge (reset value 0).
1568 [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
1569 current GPIO interrupt state for each GPIO pin. This bit is cleared when
1570 the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
1571 set when the GPIO input does not match the current value in #OLD_VALUE
1572 (reset value 0). */
1573#define MISC_REG_GPIO_INT 0xa494
1574/* [R 28] this field hold the last information that caused reserved
1575 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1576 [27:24] the master that caused the attention - according to the following
1577 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1578 dbu; 8 = dmae */
1579#define MISC_REG_GRC_RSV_ATTN 0xa3c0
1580/* [R 28] this field hold the last information that caused timeout
1581 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1582 [27:24] the master that caused the attention - according to the following
1583 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1584 dbu; 8 = dmae */
1585#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
1586/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
1587 access that does not finish within
1588 ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
1589 cleared; this timeout is disabled. If this timeout occurs; the GRC shall
1590 assert it attention output. */
1591#define MISC_REG_GRC_TIMEOUT_EN 0xa280
1592/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
1593 the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
1594 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
1595 (reset value 001) Charge pump current control; 111 for 720u; 011 for
1596 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
1597 Global bias control; When bit 7 is high bias current will be 10 0gh; When
1598 bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
1599 Pll_observe (reset value 010) Bits to control observability. bit 10 is
1600 for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
1601 (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
1602 and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
1603 sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
1604 internally). [14] reserved (reset value 0) Reset for VCO sequencer is
1605 connected to RESET input directly. [15] capRetry_en (reset value 0)
1606 enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
1607 value 0) bit to continuously monitor vco freq (inverted). [17]
1608 freqDetRestart_en (reset value 0) bit to enable restart when not freq
1609 locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
1610 retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
1611 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
1612 pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
1613 (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
1614 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
1615 bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
1616 enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
1617 capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
1618 restart. [27] capSelectM_en (reset value 0) bit to enable cap select
1619 register bits. */
1620#define MISC_REG_LCPLL_CTRL_1 0xa2a4
1621#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
1622/* [RW 4] Interrupt mask register #0 read/write */
1623#define MISC_REG_MISC_INT_MASK 0xa388
1624/* [RW 1] Parity mask register #0 read/write */
1625#define MISC_REG_MISC_PRTY_MASK 0xa398
1626/* [R 1] Parity register #0 read */
1627#define MISC_REG_MISC_PRTY_STS 0xa38c
1628/* [RC 1] Parity register #0 read clear */
1629#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
1630#define MISC_REG_NIG_WOL_P0 0xa270
1631#define MISC_REG_NIG_WOL_P1 0xa274
1632/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
1633 assertion */
1634#define MISC_REG_PCIE_HOT_RESET 0xa618
1635/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
1636 inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
1637 divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
1638 divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
1639 divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
1640 divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
1641 freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
1642 (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
1643 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
1644 Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
1645 value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
1646 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
1647 [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
1648 Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
1649 testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
1650 testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
1651 testa_en (reset value 0); */
1652#define MISC_REG_PLL_STORM_CTRL_1 0xa294
1653#define MISC_REG_PLL_STORM_CTRL_2 0xa298
1654#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
1655#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
1656/* [R 1] Status of 4 port mode enable input pin. */
1657#define MISC_REG_PORT4MODE_EN 0xa750
1658/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
1659 * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
1660 * the port4mode_en output is equal to bit[1] of this register; [1] -
1661 * Overwrite value. If bit[0] of this register is 1 this is the value that
1662 * receives the port4mode_en output . */
1663#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
1664/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
1665 write/read zero = the specific block is in reset; addr 0-wr- the write
1666 value will be written to the register; addr 1-set - one will be written
1667 to all the bits that have the value of one in the data written (bits that
1668 have the value of zero will not be change) ; addr 2-clear - zero will be
1669 written to all the bits that have the value of one in the data written
1670 (bits that have the value of zero will not be change); addr 3-ignore;
1671 read ignore from all addr except addr 00; inside order of the bits is:
1672 [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
1673 [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
1674 rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
1675 [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
1676 Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
1677 rst_pxp_rq_rd_wr; 31:17] reserved */
1678#define MISC_REG_RESET_REG_2 0xa590
1679/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
1680 shared with the driver resides */
1681#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
1682/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
1683 the corresponding SPIO bit will turn off it's drivers and become an
1684 input. This is the reset state of all SPIO pins. The read value of these
1685 bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
1686 bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
1687 is written as a '1'; the corresponding SPIO bit will drive low. The read
1688 value of these bits will be a '1' if that last command (#SET; #CLR; or
1689#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
1690 these bits is written as a '1'; the corresponding SPIO bit will drive
1691 high (if it has that capability). The read value of these bits will be a
1692 '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
1693 (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
1694 each of the eight SPIO pins. This is the result value of the pin; not the
1695 drive value. Writing these bits will have not effect. Each 8 bits field
1696 is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
1697 from VAUX. (This is an output pin only; the FLOAT field is not applicable
1698 for this pin); [1] VAUX Disable; when pulsed low; disables supply form
1699 VAUX. (This is an output pin only; FLOAT field is not applicable for this
1700 pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
1701 select VAUX supply. (This is an output pin only; it is not controlled by
1702 the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
1703 field is not applicable for this pin; only the VALUE fields is relevant -
1704 it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
1705 Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
1706 device ID select; read by UMP firmware. */
1707#define MISC_REG_SPIO 0xa4fc
1708/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
1709 according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
1710 [7:0] reserved */
1711#define MISC_REG_SPIO_EVENT_EN 0xa2b8
1712/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
1713 corresponding bit in the #OLD_VALUE register. This will acknowledge an
1714 interrupt on the falling edge of corresponding SPIO input (reset value
1715 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
1716 in the #OLD_VALUE register. This will acknowledge an interrupt on the
1717 rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
1718 RO; These bits indicate the old value of the SPIO input value. When the
1719 ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
1720 that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
1721 to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
1722 interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
1723 RO; These bits indicate the current SPIO interrupt state for each SPIO
1724 pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
1725 command bit is written. This bit is set when the SPIO input does not
1726 match the current value in #OLD_VALUE (reset value 0). */
1727#define MISC_REG_SPIO_INT 0xa500
1728/* [RW 32] reload value for counter 4 if reload; the value will be reload if
1729 the counter reached zero and the reload bit
1730 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
1731#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
1732/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
1733 in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
1734 timer 8 */
1735#define MISC_REG_SW_TIMER_VAL 0xa5c0
1736/* [R 1] Status of two port mode path swap input pin. */
1737#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758
1738/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
1739 path_swap output is equal to 2 port mode path swap input pin; if it is 1
1740 - the path_swap output is equal to bit[1] of this register; [1] -
1741 Overwrite value. If bit[0] of this register is 1 this is the value that
1742 receives the path_swap output. Reset on Hard reset. */
1743#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c
1744/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
1745 loaded; 0-prepare; -unprepare */
1746#define MISC_REG_UNPREPARED 0xa424
1747#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
1748#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
1749#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
1750#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
1751#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
1752/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
1753 * not it is the recipient of the message on the MDIO interface. The value
1754 * is compared to the value on ctrl_md_devad. Drives output
1755 * misc_xgxs0_phy_addr. Global register. */
1756#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
1757/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
1758 side. This should be less than or equal to phy_port_mode; if some of the
1759 ports are not used. This enables reduction of frequency on the core side.
1760 This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
1761 Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
1762 input for the XMAC_MP core; and should be changed only while reset is
1763 held low. Reset on Hard reset. */
1764#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964
1765/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
1766 Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
1767 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
1768 XMAC_MP core; and should be changed only while reset is held low. Reset
1769 on Hard reset. */
1770#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960
1771/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
1772 * Reads from this register will clear bits 31:0. */
1773#define MSTAT_REG_RX_STAT_GR64_LO 0x200
1774/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
1775 * 31:0. Reads from this register will clear bits 31:0. */
1776#define MSTAT_REG_TX_STAT_GTXPOK_LO 0
1777#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
1778#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
1779#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
1780#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
1781#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
1782#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
1783#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
1784#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
1785#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
1786#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
1787#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18)
1788/* [RW 1] Input enable for RX_BMAC0 IF */
1789#define NIG_REG_BMAC0_IN_EN 0x100ac
1790/* [RW 1] output enable for TX_BMAC0 IF */
1791#define NIG_REG_BMAC0_OUT_EN 0x100e0
1792/* [RW 1] output enable for TX BMAC pause port 0 IF */
1793#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110
1794/* [RW 1] output enable for RX_BMAC0_REGS IF */
1795#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8
1796/* [RW 1] output enable for RX BRB1 port0 IF */
1797#define NIG_REG_BRB0_OUT_EN 0x100f8
1798/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
1799#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4
1800/* [RW 1] output enable for RX BRB1 port1 IF */
1801#define NIG_REG_BRB1_OUT_EN 0x100fc
1802/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
1803#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
1804/* [RW 1] output enable for RX BRB1 LP IF */
1805#define NIG_REG_BRB_LB_OUT_EN 0x10100
1806/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
1807 error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
1808 72:73]-vnic_num; 81:74]-sideband_info */
1809#define NIG_REG_DEBUG_PACKET_LB 0x10800
1810/* [RW 1] Input enable for TX Debug packet */
1811#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
1812/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
1813 packets from PBFare not forwarded to the MAC and just deleted from FIFO.
1814 First packet may be deleted from the middle. And last packet will be
1815 always deleted till the end. */
1816#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060
1817/* [RW 1] Output enable to EMAC0 */
1818#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120
1819/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
1820 to emac for port0; other way to bmac for port0 */
1821#define NIG_REG_EGRESS_EMAC0_PORT 0x10058
1822/* [RW 1] Input enable for TX PBF user packet port0 IF */
1823#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
1824/* [RW 1] Input enable for TX PBF user packet port1 IF */
1825#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0
1826/* [RW 1] Input enable for TX UMP management packet port0 IF */
1827#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4
1828/* [RW 1] Input enable for RX_EMAC0 IF */
1829#define NIG_REG_EMAC0_IN_EN 0x100a4
1830/* [RW 1] output enable for TX EMAC pause port 0 IF */
1831#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118
1832/* [R 1] status from emac0. This bit is set when MDINT from either the
1833 EXT_MDINT pin or from the Copper PHY is driven low. This condition must
1834 be cleared in the attached PHY device that is driving the MINT pin. */
1835#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494
1836/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
1837 are described in appendix A. In order to access the BMAC0 registers; the
1838 base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
1839 added to each BMAC register offset */
1840#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00
1841/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
1842 are described in appendix A. In order to access the BMAC0 registers; the
1843 base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
1844 added to each BMAC register offset */
1845#define NIG_REG_INGRESS_BMAC1_MEM 0x11000
1846/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
1847#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0
1848/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
1849 packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
1850#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
1851/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
1852 logic for interrupts must be used. Enable per bit of interrupt of
1853 ~latch_status.latch_status */
1854#define NIG_REG_LATCH_BC_0 0x16210
1855/* [RW 27] Latch for each interrupt from Unicore.b[0]
1856 status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
1857 b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
1858 b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
1859 b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
1860 b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
1861 b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
1862 b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
1863 b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
1864 b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
1865 b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
1866 b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
1867 b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
1868#define NIG_REG_LATCH_STATUS_0 0x18000
1869/* [RW 1] led 10g for port 0 */
1870#define NIG_REG_LED_10G_P0 0x10320
1871/* [RW 1] led 10g for port 1 */
1872#define NIG_REG_LED_10G_P1 0x10324
1873/* [RW 1] Port0: This bit is set to enable the use of the
1874 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
1875 defined below. If this bit is cleared; then the blink rate will be about
1876 8Hz. */
1877#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318
1878/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
1879 Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
1880 is reset to 0x080; giving a default blink period of approximately 8Hz. */
1881#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
1882/* [RW 1] Port0: If set along with the
1883 ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
1884 bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
1885 bit; the Traffic LED will blink with the blink rate specified in
1886 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
1887 ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
1888 fields. */
1889#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308
1890/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
1891 Traffic LED will then be controlled via bit ~nig_registers_
1892 led_control_traffic_p0.led_control_traffic_p0 and bit
1893 ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
1894#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8
1895/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
1896 turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
1897 set; the LED will blink with blink rate specified in
1898 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
1899 ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
1900 fields. */
1901#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300
1902/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
1903 9-11PHY7; 12 MAC4; 13-15 PHY10; */
1904#define NIG_REG_LED_MODE_P0 0x102f0
1905/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
1906 tsdm enable; b2- usdm enable */
1907#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
1908#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
1909/* [RW 1] SAFC enable for port0. This register may get 1 only when
1910 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
1911 port */
1912#define NIG_REG_LLFC_ENABLE_0 0x16208
1913#define NIG_REG_LLFC_ENABLE_1 0x1620c
1914/* [RW 16] classes are high-priority for port0 */
1915#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
1916#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
1917/* [RW 16] classes are low-priority for port0 */
1918#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
1919#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
1920/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
1921#define NIG_REG_LLFC_OUT_EN_0 0x160c8
1922#define NIG_REG_LLFC_OUT_EN_1 0x160cc
1923#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
1924#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
1925#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
1926#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
1927/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1928#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
1929/* [RW 2] Determine the classification participants. 0: no classification.1:
1930 classification upon VLAN id. 2: classification upon MAC address. 3:
1931 classification upon both VLAN id & MAC addr. */
1932#define NIG_REG_LLH0_CLS_TYPE 0x16080
1933/* [RW 32] cm header for llh0 */
1934#define NIG_REG_LLH0_CM_HEADER 0x1007c
1935#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
1936#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
1937/* [RW 16] destination TCP address 1. The LLH will look for this address in
1938 all incoming packets. */
1939#define NIG_REG_LLH0_DEST_TCP_0 0x10220
1940/* [RW 16] destination UDP address 1 The LLH will look for this address in
1941 all incoming packets. */
1942#define NIG_REG_LLH0_DEST_UDP_0 0x10214
1943#define NIG_REG_LLH0_ERROR_MASK 0x1008c
1944/* [RW 8] event id for llh0 */
1945#define NIG_REG_LLH0_EVENT_ID 0x10084
1946#define NIG_REG_LLH0_FUNC_EN 0x160fc
1947#define NIG_REG_LLH0_FUNC_MEM 0x16180
1948#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
1949#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
1950/* [RW 1] Determine the IP version to look for in
1951 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
1952#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
1953/* [RW 1] t bit for llh0 */
1954#define NIG_REG_LLH0_T_BIT 0x10074
1955/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
1956#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
1957/* [RW 8] init credit counter for port0 in LLH */
1958#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
1959#define NIG_REG_LLH0_XCM_MASK 0x10130
1960#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
1961/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1962#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
1963/* [RW 2] Determine the classification participants. 0: no classification.1:
1964 classification upon VLAN id. 2: classification upon MAC address. 3:
1965 classification upon both VLAN id & MAC addr. */
1966#define NIG_REG_LLH1_CLS_TYPE 0x16084
1967/* [RW 32] cm header for llh1 */
1968#define NIG_REG_LLH1_CM_HEADER 0x10080
1969#define NIG_REG_LLH1_ERROR_MASK 0x10090
1970/* [RW 8] event id for llh1 */
1971#define NIG_REG_LLH1_EVENT_ID 0x10088
1972#define NIG_REG_LLH1_FUNC_MEM 0x161c0
1973#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
1974#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
1975/* [RW 1] When this bit is set; the LLH will classify the packet before
1976 * sending it to the BRB or calculating WoL on it. This bit controls port 1
1977 * only. The legacy llh_multi_function_mode bit controls port 0. */
1978#define NIG_REG_LLH1_MF_MODE 0x18614
1979/* [RW 8] init credit counter for port1 in LLH */
1980#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
1981#define NIG_REG_LLH1_XCM_MASK 0x10134
1982/* [RW 1] When this bit is set; the LLH will expect all packets to be with
1983 e1hov */
1984#define NIG_REG_LLH_E1HOV_MODE 0x160d8
1985/* [RW 1] When this bit is set; the LLH will classify the packet before
1986 sending it to the BRB or calculating WoL on it. */
1987#define NIG_REG_LLH_MF_MODE 0x16024
1988#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
1989#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
1990/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
1991#define NIG_REG_NIG_EMAC0_EN 0x1003c
1992/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
1993#define NIG_REG_NIG_EMAC1_EN 0x10040
1994/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
1995 EMAC0 to strip the CRC from the ingress packets. */
1996#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
1997/* [R 32] Interrupt register #0 read */
1998#define NIG_REG_NIG_INT_STS_0 0x103b0
1999#define NIG_REG_NIG_INT_STS_1 0x103c0
2000/* [R 32] Legacy E1 and E1H location for parity error mask register. */
2001#define NIG_REG_NIG_PRTY_MASK 0x103dc
2002/* [RW 32] Parity mask register #0 read/write */
2003#define NIG_REG_NIG_PRTY_MASK_0 0x183c8
2004#define NIG_REG_NIG_PRTY_MASK_1 0x183d8
2005/* [R 32] Legacy E1 and E1H location for parity error status register. */
2006#define NIG_REG_NIG_PRTY_STS 0x103d0
2007/* [R 32] Parity register #0 read */
2008#define NIG_REG_NIG_PRTY_STS_0 0x183bc
2009#define NIG_REG_NIG_PRTY_STS_1 0x183cc
2010/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
2011#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4
2012/* [RC 32] Parity register #0 read clear */
2013#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0
2014#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0
2015#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
2016#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
2017#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
2018#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
2019/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2020 * Ethernet header. */
2021#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
2022/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
2023 * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
2024 * disabled when this bit is set. */
2025#define NIG_REG_P0_HWPFC_ENABLE 0x18078
2026#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
2027#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
2028/* [RW 1] Input enable for RX MAC interface. */
2029#define NIG_REG_P0_MAC_IN_EN 0x185ac
2030/* [RW 1] Output enable for TX MAC interface */
2031#define NIG_REG_P0_MAC_OUT_EN 0x185b0
2032/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
2033#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4
2034/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
2035 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
2036 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
2037 * priority field is extracted from the outer-most VLAN in receive packet.
2038 * Only COS 0 and COS 1 are supported in E2. */
2039#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
2040/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
2041 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
2042 * than one bit may be set; allowing multiple priorities to be mapped to one
2043 * COS. */
2044#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
2045/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
2046 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
2047 * than one bit may be set; allowing multiple priorities to be mapped to one
2048 * COS. */
2049#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
2050/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
2051 * priority is mapped to COS 2 when the corresponding mask bit is 1. More
2052 * than one bit may be set; allowing multiple priorities to be mapped to one
2053 * COS. */
2054#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0
2055/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
2056 * priority is mapped to COS 3 when the corresponding mask bit is 1. More
2057 * than one bit may be set; allowing multiple priorities to be mapped to one
2058 * COS. */
2059#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4
2060/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
2061 * priority is mapped to COS 4 when the corresponding mask bit is 1. More
2062 * than one bit may be set; allowing multiple priorities to be mapped to one
2063 * COS. */
2064#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8
2065/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
2066 * priority is mapped to COS 5 when the corresponding mask bit is 1. More
2067 * than one bit may be set; allowing multiple priorities to be mapped to one
2068 * COS. */
2069#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc
2070/* [R 1] RX FIFO for receiving data from MAC is empty. */
2071/* [RW 15] Specify which of the credit registers the client is to be mapped
2072 * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
2073 * clients that are not subject to WFQ credit blocking - their
2074 * specifications here are not used. */
2075#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
2076/* [RW 32] Specify which of the credit registers the client is to be mapped
2077 * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
2078 * for client 0; bits [35:32] are for client 8. For clients that are not
2079 * subject to WFQ credit blocking - their specifications here are not used.
2080 * This is a new register (with 2_) added in E3 B0 to accommodate the 9
2081 * input clients to ETS arbiter. The reset default is set for management and
2082 * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
2083 * use credit registers 0-5 respectively (0x543210876). Note that credit
2084 * registers can not be shared between clients. */
2085#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688
2086/* [RW 4] Specify which of the credit registers the client is to be mapped
2087 * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
2088 * for client 0; bits [35:32] are for client 8. For clients that are not
2089 * subject to WFQ credit blocking - their specifications here are not used.
2090 * This is a new register (with 2_) added in E3 B0 to accommodate the 9
2091 * input clients to ETS arbiter. The reset default is set for management and
2092 * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
2093 * use credit registers 0-5 respectively (0x543210876). Note that credit
2094 * registers can not be shared between clients. */
2095#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c
2096/* [RW 5] Specify whether the client competes directly in the strict
2097 * priority arbiter. The bits are mapped according to client ID (client IDs
2098 * are defined in tx_arb_priority_client). Default value is set to enable
2099 * strict priorities for clients 0-2 -- management and debug traffic. */
2100#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
2101/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
2102 * bits are mapped according to client ID (client IDs are defined in
2103 * tx_arb_priority_client). Default value is 0 for not using WFQ credit
2104 * blocking. */
2105#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
2106/* [RW 32] Specify the upper bound that credit register 0 is allowed to
2107 * reach. */
2108#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
2109#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
2110#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114
2111#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118
2112#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c
2113#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0
2114#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4
2115#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8
2116#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac
2117/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
2118 * when it is time to increment. */
2119#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
2120#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
2121#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100
2122#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104
2123#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108
2124#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690
2125#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694
2126#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698
2127#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c
2128/* [RW 12] Specify the number of strict priority arbitration slots between
2129 * two round-robin arbitration slots to avoid starvation. A value of 0 means
2130 * no strict priority cycles - the strict priority with anti-starvation
2131 * arbiter becomes a round-robin arbiter. */
2132#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
2133/* [RW 15] Specify the client number to be assigned to each priority of the
2134 * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
2135 * are for priority 0 client; bits [14:12] are for priority 4 client. The
2136 * clients are assigned the following IDs: 0-management; 1-debug traffic
2137 * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
2138 * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
2139 * for management at priority 0; debug traffic at priorities 1 and 2; COS0
2140 * traffic at priority 3; and COS1 traffic at priority 4. */
2141#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
2142/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2143 * Ethernet header. */
2144#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
2145#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
2146#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
2147/* [RW 32] Specify the client number to be assigned to each priority of the
2148 * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
2149 * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
2150 * client; bits [35-32] are for priority 8 client. The clients are assigned
2151 * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
2152 * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
2153 * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
2154 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2155 * accommodate the 9 input clients to ETS arbiter. */
2156#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680
2157/* [RW 4] Specify the client number to be assigned to each priority of the
2158 * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
2159 * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
2160 * client; bits [35-32] are for priority 8 client. The clients are assigned
2161 * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
2162 * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
2163 * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
2164 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2165 * accommodate the 9 input clients to ETS arbiter. */
2166#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
2167#define NIG_REG_P1_MAC_IN_EN 0x185c0
2168/* [RW 1] Output enable for TX MAC interface */
2169#define NIG_REG_P1_MAC_OUT_EN 0x185c4
2170/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
2171#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8
2172/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
2173 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
2174 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
2175 * priority field is extracted from the outer-most VLAN in receive packet.
2176 * Only COS 0 and COS 1 are supported in E2. */
2177#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
2178/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
2179 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
2180 * than one bit may be set; allowing multiple priorities to be mapped to one
2181 * COS. */
2182#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
2183/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
2184 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
2185 * than one bit may be set; allowing multiple priorities to be mapped to one
2186 * COS. */
2187#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
2188/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
2189 * priority is mapped to COS 2 when the corresponding mask bit is 1. More
2190 * than one bit may be set; allowing multiple priorities to be mapped to one
2191 * COS. */
2192#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8
2193/* [R 1] RX FIFO for receiving data from MAC is empty. */
2194#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
2195/* [R 1] TLLH FIFO is empty. */
2196#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
2197/* [RW 32] Specify which of the credit registers the client is to be mapped
2198 * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
2199 * for client 0; bits [35:32] are for client 8. For clients that are not
2200 * subject to WFQ credit blocking - their specifications here are not used.
2201 * This is a new register (with 2_) added in E3 B0 to accommodate the 9
2202 * input clients to ETS arbiter. The reset default is set for management and
2203 * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
2204 * use credit registers 0-5 respectively (0x543210876). Note that credit
2205 * registers can not be shared between clients. Note also that there are
2206 * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
2207 * credit registers 0-5 are valid. This register should be configured
2208 * appropriately before enabling WFQ. */
2209#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8
2210/* [RW 4] Specify which of the credit registers the client is to be mapped
2211 * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
2212 * for client 0; bits [35:32] are for client 8. For clients that are not
2213 * subject to WFQ credit blocking - their specifications here are not used.
2214 * This is a new register (with 2_) added in E3 B0 to accommodate the 9
2215 * input clients to ETS arbiter. The reset default is set for management and
2216 * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
2217 * use credit registers 0-5 respectively (0x543210876). Note that credit
2218 * registers can not be shared between clients. Note also that there are
2219 * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
2220 * credit registers 0-5 are valid. This register should be configured
2221 * appropriately before enabling WFQ. */
2222#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec
2223/* [RW 9] Specify whether the client competes directly in the strict
2224 * priority arbiter. The bits are mapped according to client ID (client IDs
2225 * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
2226 * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
2227 * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
2228 * Default value is set to enable strict priorities for all clients. */
2229#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234
2230/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
2231 * bits are mapped according to client ID (client IDs are defined in
2232 * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
2233 * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
2234 * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
2235 * 0 for not using WFQ credit blocking. */
2236#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238
2237#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258
2238#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c
2239#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260
2240#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264
2241#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268
2242#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4
2243/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
2244 * when it is time to increment. */
2245#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244
2246#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248
2247#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c
2248#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250
2249#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254
2250#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0
2251/* [RW 12] Specify the number of strict priority arbitration slots between
2252 two round-robin arbitration slots to avoid starvation. A value of 0 means
2253 no strict priority cycles - the strict priority with anti-starvation
2254 arbiter becomes a round-robin arbiter. */
2255#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240
2256/* [RW 32] Specify the client number to be assigned to each priority of the
2257 strict priority arbiter. This register specifies bits 31:0 of the 36-bit
2258 value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
2259 client; bits [35-32] are for priority 8 client. The clients are assigned
2260 the following IDs: 0-management; 1-debug traffic from this port; 2-debug
2261 traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
2262 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
2263 set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2264 accommodate the 9 input clients to ETS arbiter. Note that this register
2265 is the same as the one for port 0, except that port 1 only has COS 0-2
2266 traffic. There is no traffic for COS 3-5 of port 1. */
2267#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0
2268/* [RW 4] Specify the client number to be assigned to each priority of the
2269 strict priority arbiter. This register specifies bits 35:32 of the 36-bit
2270 value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
2271 client; bits [35-32] are for priority 8 client. The clients are assigned
2272 the following IDs: 0-management; 1-debug traffic from this port; 2-debug
2273 traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
2274 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
2275 set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2276 accommodate the 9 input clients to ETS arbiter. Note that this register
2277 is the same as the one for port 0, except that port 1 only has COS 0-2
2278 traffic. There is no traffic for COS 3-5 of port 1. */
2279#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
2280/* [R 1] TX FIFO for transmitting data to MAC is empty. */
2281#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
2282/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
2283 forwarded to the host. */
2284#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
2285/* [RW 32] Specify the upper bound that credit register 0 is allowed to
2286 * reach. */
2287/* [RW 1] Pause enable for port0. This register may get 1 only when
2288 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
2289 port */
2290#define NIG_REG_PAUSE_ENABLE_0 0x160c0
2291#define NIG_REG_PAUSE_ENABLE_1 0x160c4
2292/* [RW 1] Input enable for RX PBF LP IF */
2293#define NIG_REG_PBF_LB_IN_EN 0x100b4
2294/* [RW 1] Value of this register will be transmitted to port swap when
2295 ~nig_registers_strap_override.strap_override =1 */
2296#define NIG_REG_PORT_SWAP 0x10394
2297/* [RW 1] PPP enable for port0. This register may get 1 only when
2298 * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
2299 * same port */
2300#define NIG_REG_PPP_ENABLE_0 0x160b0
2301#define NIG_REG_PPP_ENABLE_1 0x160b4
2302/* [RW 1] output enable for RX parser descriptor IF */
2303#define NIG_REG_PRS_EOP_OUT_EN 0x10104
2304/* [RW 1] Input enable for RX parser request IF */
2305#define NIG_REG_PRS_REQ_IN_EN 0x100b8
2306/* [RW 5] control to serdes - CL45 DEVAD */
2307#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370
2308/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
2309#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c
2310/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
2311#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374
2312/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
2313#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578
2314/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
2315 for port0 */
2316#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
2317/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
2318 for port0 */
2319#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
2320/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
2321 between 1024 and 1522 bytes for port0 */
2322#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
2323/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
2324 between 1523 bytes and above for port0 */
2325#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
2326/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
2327 for port1 */
2328#define NIG_REG_STAT1_BRB_DISCARD 0x10628
2329/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
2330 between 1024 and 1522 bytes for port1 */
2331#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
2332/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
2333 between 1523 bytes and above for port1 */
2334#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
2335/* [WB_R 64] Rx statistics : User octets received for LP */
2336#define NIG_REG_STAT2_BRB_OCTET 0x107e0
2337#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
2338#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
2339/* [RW 1] port swap mux selection. If this register equal to 0 then port
2340 swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
2341 ort swap is equal to ~nig_registers_port_swap.port_swap */
2342#define NIG_REG_STRAP_OVERRIDE 0x10398
2343/* [RW 1] output enable for RX_XCM0 IF */
2344#define NIG_REG_XCM0_OUT_EN 0x100f0
2345/* [RW 1] output enable for RX_XCM1 IF */
2346#define NIG_REG_XCM1_OUT_EN 0x100f4
2347/* [RW 1] control to xgxs - remote PHY in-band MDIO */
2348#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
2349/* [RW 5] control to xgxs - CL45 DEVAD */
2350#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
2351/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
2352#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
2353/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
2354#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
2355/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
2356#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680
2357/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
2358#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684
2359/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
2360#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8
2361/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
2362#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0
2363#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0)
2364#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
2365#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
2366#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
2367#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
2368/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
2369#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
2370/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
2371 * of port 0. */
2372#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc
2373/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
2374 * of port 1. */
2375#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4
2376/* [RW 31] The weight of COS0 in the ETS command arbiter. */
2377#define PBF_REG_COS0_WEIGHT 0x15c054
2378/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
2379#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8
2380/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
2381#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0
2382/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
2383#define PBF_REG_COS1_UPPER_BOUND 0x15c060
2384/* [RW 31] The weight of COS1 in the ETS command arbiter. */
2385#define PBF_REG_COS1_WEIGHT 0x15c058
2386/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
2387#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac
2388/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
2389#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4
2390/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
2391#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0
2392/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
2393#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8
2394/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
2395#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4
2396/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
2397#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8
2398/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
2399#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc
2400/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
2401 * lines. */
2402#define PBF_REG_CREDIT_LB_Q 0x140338
2403/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
2404 * lines. */
2405#define PBF_REG_CREDIT_Q0 0x14033c
2406/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
2407 * lines. */
2408#define PBF_REG_CREDIT_Q1 0x140340
2409/* [RW 1] Disable processing further tasks from port 0 (after ending the
2410 current task in process). */
2411#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
2412/* [RW 1] Disable processing further tasks from port 1 (after ending the
2413 current task in process). */
2414#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060
2415/* [RW 1] Disable processing further tasks from port 4 (after ending the
2416 current task in process). */
2417#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
2418#define PBF_REG_DISABLE_PF 0x1402e8
2419/* [RW 18] For port 0: For each client that is subject to WFQ (the
2420 * corresponding bit is 1); indicates to which of the credit registers this
2421 * client is mapped. For clients which are not credit blocked; their mapping
2422 * is dont care. */
2423#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288
2424/* [RW 9] For port 1: For each client that is subject to WFQ (the
2425 * corresponding bit is 1); indicates to which of the credit registers this
2426 * client is mapped. For clients which are not credit blocked; their mapping
2427 * is dont care. */
2428#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c
2429/* [RW 6] For port 0: Bit per client to indicate if the client competes in
2430 * the strict priority arbiter directly (corresponding bit = 1); or first
2431 * goes to the RR arbiter (corresponding bit = 0); and then competes in the
2432 * lowest priority in the strict-priority arbiter. */
2433#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278
2434/* [RW 3] For port 1: Bit per client to indicate if the client competes in
2435 * the strict priority arbiter directly (corresponding bit = 1); or first
2436 * goes to the RR arbiter (corresponding bit = 0); and then competes in the
2437 * lowest priority in the strict-priority arbiter. */
2438#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c
2439/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
2440 * WFQ credit blocking (corresponding bit = 1). */
2441#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280
2442/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
2443 * WFQ credit blocking (corresponding bit = 1). */
2444#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284
2445/* [RW 16] For port 0: The number of strict priority arbitration slots
2446 * between 2 RR arbitration slots. A value of 0 means no strict priority
2447 * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
2448 * arbiter. */
2449#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0
2450/* [RW 16] For port 1: The number of strict priority arbitration slots
2451 * between 2 RR arbitration slots. A value of 0 means no strict priority
2452 * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
2453 * arbiter. */
2454#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4
2455/* [RW 18] For port 0: Indicates which client is connected to each priority
2456 * in the strict-priority arbiter. Priority 0 is the highest priority, and
2457 * priority 5 is the lowest; to which the RR output is connected to (this is
2458 * not configurable). */
2459#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270
2460/* [RW 9] For port 1: Indicates which client is connected to each priority
2461 * in the strict-priority arbiter. Priority 0 is the highest priority, and
2462 * priority 5 is the lowest; to which the RR output is connected to (this is
2463 * not configurable). */
2464#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274
2465/* [RW 1] Indicates that ETS is performed between the COSes in the command
2466 * arbiter. If reset strict priority w/ anti-starvation will be performed
2467 * w/o WFQ. */
2468#define PBF_REG_ETS_ENABLED 0x15c050
2469/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2470 * Ethernet header. */
2471#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
2472/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
2473#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
2474/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
2475 * priority in the command arbiter. */
2476#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
2477#define PBF_REG_IF_ENABLE_REG 0x140044
2478/* [RW 1] Init bit. When set the initial credits are copied to the credit
2479 registers (except the port credits). Should be set and then reset after
2480 the configuration of the block has ended. */
2481#define PBF_REG_INIT 0x140000
2482/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
2483 * lines. */
2484#define PBF_REG_INIT_CRD_LB_Q 0x15c248
2485/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
2486 * lines. */
2487#define PBF_REG_INIT_CRD_Q0 0x15c230
2488/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
2489 * lines. */
2490#define PBF_REG_INIT_CRD_Q1 0x15c234
2491/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
2492 copied to the credit register. Should be set and then reset after the
2493 configuration of the port has ended. */
2494#define PBF_REG_INIT_P0 0x140004
2495/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
2496 copied to the credit register. Should be set and then reset after the
2497 configuration of the port has ended. */
2498#define PBF_REG_INIT_P1 0x140008
2499/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
2500 copied to the credit register. Should be set and then reset after the
2501 configuration of the port has ended. */
2502#define PBF_REG_INIT_P4 0x14000c
2503/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
2504 * the LB queue. Reset upon init. */
2505#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354
2506/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
2507 * queue 0. Reset upon init. */
2508#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358
2509/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
2510 * queue 1. Reset upon init. */
2511#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c
2512/* [RW 1] Enable for mac interface 0. */
2513#define PBF_REG_MAC_IF0_ENABLE 0x140030
2514/* [RW 1] Enable for mac interface 1. */
2515#define PBF_REG_MAC_IF1_ENABLE 0x140034
2516/* [RW 1] Enable for the loopback interface. */
2517#define PBF_REG_MAC_LB_ENABLE 0x140040
2518/* [RW 6] Bit-map indicating which headers must appear in the packet */
2519#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
2520/* [RW 16] The number of strict priority arbitration slots between 2 RR
2521 * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
2522 * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
2523#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
2524/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
2525 not suppoterd. */
2526#define PBF_REG_P0_ARB_THRSH 0x1400e4
2527/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
2528#define PBF_REG_P0_CREDIT 0x140200
2529/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
2530 lines. */
2531#define PBF_REG_P0_INIT_CRD 0x1400d0
2532/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
2533 * port 0. Reset upon init. */
2534#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308
2535/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
2536#define PBF_REG_P0_PAUSE_ENABLE 0x140014
2537/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
2538#define PBF_REG_P0_TASK_CNT 0x140204
2539/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
2540 * freed from the task queue of port 0. Reset upon init. */
2541#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0
2542/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
2543#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc
2544/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
2545 * buffers in 16 byte lines. */
2546#define PBF_REG_P1_CREDIT 0x140208
2547/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
2548 * buffers in 16 byte lines. */
2549#define PBF_REG_P1_INIT_CRD 0x1400d4
2550/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
2551 * port 1. Reset upon init. */
2552#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c
2553/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
2554#define PBF_REG_P1_TASK_CNT 0x14020c
2555/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
2556 * freed from the task queue of port 1. Reset upon init. */
2557#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4
2558/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
2559#define PBF_REG_P1_TQ_OCCUPANCY 0x140300
2560/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
2561#define PBF_REG_P4_CREDIT 0x140210
2562/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
2563 lines. */
2564#define PBF_REG_P4_INIT_CRD 0x1400e0
2565/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
2566 * port 4. Reset upon init. */
2567#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310
2568/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
2569#define PBF_REG_P4_TASK_CNT 0x140214
2570/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
2571 * freed from the task queue of port 4. Reset upon init. */
2572#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8
2573/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
2574#define PBF_REG_P4_TQ_OCCUPANCY 0x140304
2575/* [RW 5] Interrupt mask register #0 read/write */
2576#define PBF_REG_PBF_INT_MASK 0x1401d4
2577/* [R 5] Interrupt register #0 read */
2578#define PBF_REG_PBF_INT_STS 0x1401c8
2579/* [RW 20] Parity mask register #0 read/write */
2580#define PBF_REG_PBF_PRTY_MASK 0x1401e4
2581/* [RC 20] Parity register #0 read clear */
2582#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
2583/* [RW 16] The Ethernet type value for L2 tag 0 */
2584#define PBF_REG_TAG_ETHERTYPE_0 0x15c090
2585/* [RW 4] The length of the info field for L2 tag 0. The length is between
2586 * 2B and 14B; in 2B granularity */
2587#define PBF_REG_TAG_LEN_0 0x15c09c
2588/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
2589 * queue. Reset upon init. */
2590#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c
2591/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
2592 * queue 0. Reset upon init. */
2593#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390
2594/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
2595 * Reset upon init. */
2596#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394
2597/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
2598 * queue. */
2599#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8
2600/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
2601#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
2602/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
2603#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
2604#define PB_REG_CONTROL 0
2605/* [RW 2] Interrupt mask register #0 read/write */
2606#define PB_REG_PB_INT_MASK 0x28
2607/* [R 2] Interrupt register #0 read */
2608#define PB_REG_PB_INT_STS 0x1c
2609/* [RW 4] Parity mask register #0 read/write */
2610#define PB_REG_PB_PRTY_MASK 0x38
2611/* [R 4] Parity register #0 read */
2612#define PB_REG_PB_PRTY_STS 0x2c
2613/* [RC 4] Parity register #0 read clear */
2614#define PB_REG_PB_PRTY_STS_CLR 0x30
2615#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
2616#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
2617#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
2618#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
2619#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
2620#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
2621#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
2622#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
2623#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
2624/* [R 8] Config space A attention dirty bits. Each bit indicates that the
2625 * corresponding PF generates config space A attention. Set by PXP. Reset by
2626 * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
2627 * from both paths. */
2628#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
2629/* [R 8] Config space B attention dirty bits. Each bit indicates that the
2630 * corresponding PF generates config space B attention. Set by PXP. Reset by
2631 * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
2632 * from both paths. */
2633#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
2634/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
2635 * - enable. */
2636#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
2637/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
2638 * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
2639#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
2640/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
2641 * - enable. */
2642#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
2643/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
2644#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
2645/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
2646#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
2647/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
2648#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
2649/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2650#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
2651/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
2652 * that the FLR register of the corresponding PF was set. Set by PXP. Reset
2653 * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
2654 * from both paths. */
2655#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
2656/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
2657 * to a bit in this register in order to clear the corresponding bit in
2658 * flr_request_pf_7_0 register. Note: register contains bits from both
2659 * paths. */
2660#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
2661/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
2662 * indicates that the FLR register of the corresponding VF was set. Set by
2663 * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
2664#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
2665/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
2666 * indicates that the FLR register of the corresponding VF was set. Set by
2667 * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
2668#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
2669/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
2670 * indicates that the FLR register of the corresponding VF was set. Set by
2671 * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
2672#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
2673/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
2674 * indicates that the FLR register of the corresponding VF was set. Set by
2675 * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
2676#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
2677/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
2678 * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
2679 * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
2680 * arrived with a correctable error. Bit 3 - Configuration RW arrived with
2681 * an uncorrectable error. Bit 4 - Completion with Configuration Request
2682 * Retry Status. Bit 5 - Expansion ROM access received with a write request.
2683 * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
2684 * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
2685 * and pcie_rx_last not asserted. */
2686#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
2687#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
2688#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
2689#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
2690#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
2691/* [R 9] Interrupt register #0 read */
2692#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
2693/* [RC 9] Interrupt register #0 read clear */
2694#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
2695/* [RW 2] Parity mask register #0 read/write */
2696#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4
2697/* [R 2] Parity register #0 read */
2698#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
2699/* [RC 2] Parity register #0 read clear */
2700#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac
2701/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
2702 * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
2703 * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
2704 * completer abort. 3 - Illegal value for this field. [12] valid - indicates
2705 * if there was a completion error since the last time this register was
2706 * cleared. */
2707#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
2708/* [R 18] Details of first ATS Translation Completion request received with
2709 * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
2710 * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
2711 * unsupported request. 2 - completer abort. 3 - Illegal value for this
2712 * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
2713 * completion error since the last time this register was cleared. */
2714#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
2715/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
2716 * a bit in this register in order to clear the corresponding bit in
2717 * shadow_bme_pf_7_0 register. MCP should never use this unless a
2718 * work-around is needed. Note: register contains bits from both paths. */
2719#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
2720/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
2721 * VF enable register of the corresponding PF is written to 0 and was
2722 * previously 1. Set by PXP. Reset by MCP writing 1 to
2723 * sr_iov_disabled_request_clr. Note: register contains bits from both
2724 * paths. */
2725#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
2726/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
2727 * completion did not return yet. 1 - tag is unused. Same functionality as
2728 * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
2729#define PGLUE_B_REG_TAGS_63_32 0x9244
2730/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
2731 * - enable. */
2732#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
2733/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
2734#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
2735/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
2736#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
2737/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
2738#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
2739/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2740#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
2741/* [R 32] Address [31:0] of first read request not submitted due to error */
2742#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
2743/* [R 32] Address [63:32] of first read request not submitted due to error */
2744#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
2745/* [R 31] Details of first read request not submitted due to error. [4:0]
2746 * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
2747 * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
2748 * VFID. */
2749#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
2750/* [R 26] Details of first read request not submitted due to error. [15:0]
2751 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2752 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2753 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2754 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2755 * indicates if there was a request not submitted due to error since the
2756 * last time this register was cleared. */
2757#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
2758/* [R 32] Address [31:0] of first write request not submitted due to error */
2759#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
2760/* [R 32] Address [63:32] of first write request not submitted due to error */
2761#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
2762/* [R 31] Details of first write request not submitted due to error. [4:0]
2763 * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
2764 * - VFID. */
2765#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
2766/* [R 26] Details of first write request not submitted due to error. [15:0]
2767 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2768 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2769 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2770 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2771 * indicates if there was a request not submitted due to error since the
2772 * last time this register was cleared. */
2773#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
2774/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
2775 * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
2776 * value (Byte resolution address). */
2777#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
2778#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
2779#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
2780#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
2781#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
2782#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
2783#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
2784/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
2785 * - enable. */
2786#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
2787/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
2788 * - enable. */
2789#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
2790/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
2791 * - enable. */
2792#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
2793/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
2794#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
2795/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
2796#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
2797/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
2798#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
2799/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2800#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
2801/* [R 26] Details of first target VF request accessing VF GRC space that
2802 * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
2803 * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
2804 * request accessing VF GRC space that failed permission check since the
2805 * last time this register was cleared. Permission checks are: function
2806 * permission; R/W permission; address range permission. */
2807#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
2808/* [R 31] Details of first target VF request with length violation (too many
2809 * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
2810 * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
2811 * valid - indicates if there was a request with length violation since the
2812 * last time this register was cleared. Length violations: length of more
2813 * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
2814 * length is more than 1 DW. */
2815#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
2816/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
2817 * that there was a completion with uncorrectable error for the
2818 * corresponding PF. Set by PXP. Reset by MCP writing 1 to
2819 * was_error_pf_7_0_clr. */
2820#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
2821/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
2822 * to a bit in this register in order to clear the corresponding bit in
2823 * flr_request_pf_7_0 register. */
2824#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
2825/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
2826 * indicates that there was a completion with uncorrectable error for the
2827 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2828 * was_error_vf_127_96_clr. */
2829#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
2830/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
2831 * writes 1 to a bit in this register in order to clear the corresponding
2832 * bit in was_error_vf_127_96 register. */
2833#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
2834/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
2835 * indicates that there was a completion with uncorrectable error for the
2836 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2837 * was_error_vf_31_0_clr. */
2838#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
2839/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
2840 * 1 to a bit in this register in order to clear the corresponding bit in
2841 * was_error_vf_31_0 register. */
2842#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
2843/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
2844 * indicates that there was a completion with uncorrectable error for the
2845 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2846 * was_error_vf_63_32_clr. */
2847#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
2848/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
2849 * 1 to a bit in this register in order to clear the corresponding bit in
2850 * was_error_vf_63_32 register. */
2851#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
2852/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
2853 * indicates that there was a completion with uncorrectable error for the
2854 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2855 * was_error_vf_95_64_clr. */
2856#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
2857/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
2858 * 1 to a bit in this register in order to clear the corresponding bit in
2859 * was_error_vf_95_64 register. */
2860#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
2861/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
2862 * - enable. */
2863#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
2864/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
2865#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
2866/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
2867#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
2868/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
2869#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
2870/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2871#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
2872#define PRS_REG_A_PRSU_20 0x40134
2873/* [R 8] debug only: CFC load request current credit. Transaction based. */
2874#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
2875/* [R 8] debug only: CFC search request current credit. Transaction based. */
2876#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168
2877/* [RW 6] The initial credit for the search message to the CFC interface.
2878 Credit is transaction based. */
2879#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
2880/* [RW 24] CID for port 0 if no match */
2881#define PRS_REG_CID_PORT_0 0x400fc
2882/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
2883 load response is reset and packet type is 0. Used in packet start message
2884 to TCM. */
2885#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc
2886#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0
2887#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
2888#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
2889#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
2890#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
2891/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
2892 load response is set and packet type is 0. Used in packet start message
2893 to TCM. */
2894#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc
2895#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0
2896#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
2897#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
2898#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
2899#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
2900/* [RW 32] The CM header for a match and packet type 1 for loopback port.
2901 Used in packet start message to TCM. */
2902#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
2903#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0
2904#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4
2905#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8
2906/* [RW 32] The CM header for a match and packet type 0. Used in packet start
2907 message to TCM. */
2908#define PRS_REG_CM_HDR_TYPE_0 0x40078
2909#define PRS_REG_CM_HDR_TYPE_1 0x4007c
2910#define PRS_REG_CM_HDR_TYPE_2 0x40080
2911#define PRS_REG_CM_HDR_TYPE_3 0x40084
2912#define PRS_REG_CM_HDR_TYPE_4 0x40088
2913/* [RW 32] The CM header in case there was not a match on the connection */
2914#define PRS_REG_CM_NO_MATCH_HDR 0x400b8
2915/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
2916#define PRS_REG_E1HOV_MODE 0x401c8
2917/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
2918 start message to TCM. */
2919#define PRS_REG_EVENT_ID_1 0x40054
2920#define PRS_REG_EVENT_ID_2 0x40058
2921#define PRS_REG_EVENT_ID_3 0x4005c
2922/* [RW 16] The Ethernet type value for FCoE */
2923#define PRS_REG_FCOE_TYPE 0x401d0
2924/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
2925 load request message. */
2926#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
2927#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008
2928#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c
2929#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010
2930#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014
2931#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
2932#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
2933#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
2934/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2935 * Ethernet header. */
2936#define PRS_REG_HDRS_AFTER_BASIC 0x40238
2937/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2938 * Ethernet header for port 0 packets. */
2939#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270
2940#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290
2941/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
2942#define PRS_REG_HDRS_AFTER_TAG_0 0x40248
2943/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
2944 * port 0 packets */
2945#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280
2946#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0
2947/* [RW 4] The increment value to send in the CFC load request message */
2948#define PRS_REG_INC_VALUE 0x40048
2949/* [RW 6] Bit-map indicating which headers must appear in the packet */
2950#define PRS_REG_MUST_HAVE_HDRS 0x40254
2951/* [RW 6] Bit-map indicating which headers must appear in the packet for
2952 * port 0 packets */
2953#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c
2954#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac
2955#define PRS_REG_NIC_MODE 0x40138
2956/* [RW 8] The 8-bit event ID for cases where there is no match on the
2957 connection. Used in packet start message to TCM. */
2958#define PRS_REG_NO_MATCH_EVENT_ID 0x40070
2959/* [ST 24] The number of input CFC flush packets */
2960#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128
2961/* [ST 32] The number of cycles the Parser halted its operation since it
2962 could not allocate the next serial number */
2963#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130
2964/* [ST 24] The number of input packets */
2965#define PRS_REG_NUM_OF_PACKETS 0x40124
2966/* [ST 24] The number of input transparent flush packets */
2967#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c
2968/* [RW 8] Context region for received Ethernet packet with a match and
2969 packet type 0. Used in CFC load request message */
2970#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028
2971#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c
2972#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030
2973#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034
2974#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038
2975#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c
2976#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040
2977#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044
2978/* [R 2] debug only: Number of pending requests for CAC on port 0. */
2979#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174
2980/* [R 2] debug only: Number of pending requests for header parsing. */
2981#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170
2982/* [R 1] Interrupt register #0 read */
2983#define PRS_REG_PRS_INT_STS 0x40188
2984/* [RW 8] Parity mask register #0 read/write */
2985#define PRS_REG_PRS_PRTY_MASK 0x401a4
2986/* [R 8] Parity register #0 read */
2987#define PRS_REG_PRS_PRTY_STS 0x40198
2988/* [RC 8] Parity register #0 read clear */
2989#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
2990/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
2991 request message */
2992#define PRS_REG_PURE_REGIONS 0x40024
2993/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
2994 serail number was released by SDM but cannot be used because a previous
2995 serial number was not released. */
2996#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154
2997/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
2998 serail number was released by SDM but cannot be used because a previous
2999 serial number was not released. */
3000#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
3001/* [R 4] debug only: SRC current credit. Transaction based. */
3002#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
3003/* [RW 16] The Ethernet type value for L2 tag 0 */
3004#define PRS_REG_TAG_ETHERTYPE_0 0x401d4
3005/* [RW 4] The length of the info field for L2 tag 0. The length is between
3006 * 2B and 14B; in 2B granularity */
3007#define PRS_REG_TAG_LEN_0 0x4022c
3008/* [R 8] debug only: TCM current credit. Cycle based. */
3009#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
3010/* [R 8] debug only: TSDM current credit. Transaction based. */
3011#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
3012#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
3013#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
3014#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
3015#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
3016#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
3017#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
3018#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
3019/* [R 6] Debug only: Number of used entries in the data FIFO */
3020#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
3021/* [R 7] Debug only: Number of used entries in the header FIFO */
3022#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
3023#define PXP2_REG_PGL_ADDR_88_F0 0x120534
3024/* [R 32] GRC address for configuration access to PCIE config address 0x88.
3025 * any write to this PCIE address will cause a GRC write access to the
3026 * address that's in t this register */
3027#define PXP2_REG_PGL_ADDR_88_F1 0x120544
3028#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
3029/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
3030 * any write to this PCIE address will cause a GRC write access to the
3031 * address that's in t this register */
3032#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
3033#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
3034/* [R 32] GRC address for configuration access to PCIE config address 0x90.
3035 * any write to this PCIE address will cause a GRC write access to the
3036 * address that's in t this register */
3037#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
3038#define PXP2_REG_PGL_ADDR_94_F0 0x120540
3039/* [R 32] GRC address for configuration access to PCIE config address 0x94.
3040 * any write to this PCIE address will cause a GRC write access to the
3041 * address that's in t this register */
3042#define PXP2_REG_PGL_ADDR_94_F1 0x120550
3043#define PXP2_REG_PGL_CONTROL0 0x120490
3044#define PXP2_REG_PGL_CONTROL1 0x120514
3045#define PXP2_REG_PGL_DEBUG 0x120520
3046/* [RW 32] third dword data of expansion rom request. this register is
3047 special. reading from it provides a vector outstanding read requests. if
3048 a bit is zero it means that a read request on the corresponding tag did
3049 not finish yet (not all completions have arrived for it) */
3050#define PXP2_REG_PGL_EXP_ROM2 0x120808
3051/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
3052 its[15:0]-address */
3053#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
3054#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8
3055#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc
3056#define PXP2_REG_PGL_INT_CSDM_3 0x120500
3057#define PXP2_REG_PGL_INT_CSDM_4 0x120504
3058#define PXP2_REG_PGL_INT_CSDM_5 0x120508
3059#define PXP2_REG_PGL_INT_CSDM_6 0x12050c
3060#define PXP2_REG_PGL_INT_CSDM_7 0x120510
3061/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
3062 its[15:0]-address */
3063#define PXP2_REG_PGL_INT_TSDM_0 0x120494
3064#define PXP2_REG_PGL_INT_TSDM_1 0x120498
3065#define PXP2_REG_PGL_INT_TSDM_2 0x12049c
3066#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0
3067#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4
3068#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8
3069#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac
3070#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0
3071/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
3072 its[15:0]-address */
3073#define PXP2_REG_PGL_INT_USDM_0 0x1204b4
3074#define PXP2_REG_PGL_INT_USDM_1 0x1204b8
3075#define PXP2_REG_PGL_INT_USDM_2 0x1204bc
3076#define PXP2_REG_PGL_INT_USDM_3 0x1204c0
3077#define PXP2_REG_PGL_INT_USDM_4 0x1204c4
3078#define PXP2_REG_PGL_INT_USDM_5 0x1204c8
3079#define PXP2_REG_PGL_INT_USDM_6 0x1204cc
3080#define PXP2_REG_PGL_INT_USDM_7 0x1204d0
3081/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
3082 its[15:0]-address */
3083#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4
3084#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8
3085#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc
3086#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0
3087#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4
3088#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8
3089#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec
3090#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0
3091/* [RW 3] this field allows one function to pretend being another function
3092 when accessing any BAR mapped resource within the device. the value of
3093 the field is the number of the function that will be accessed
3094 effectively. after software write to this bit it must read it in order to
3095 know that the new value is updated */
3096#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674
3097#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678
3098#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c
3099#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680
3100#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684
3101#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688
3102#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c
3103#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690
3104/* [R 1] this bit indicates that a read request was blocked because of
3105 bus_master_en was deasserted */
3106#define PXP2_REG_PGL_READ_BLOCKED 0x120568
3107#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
3108/* [R 18] debug only */
3109#define PXP2_REG_PGL_TXW_CDTS 0x12052c
3110/* [R 1] this bit indicates that a write request was blocked because of
3111 bus_master_en was deasserted */
3112#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564
3113#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0
3114#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4
3115#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8
3116#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4
3117#define PXP2_REG_PSWRQ_BW_ADD28 0x120228
3118#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8
3119#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4
3120#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8
3121#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc
3122#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0
3123#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c
3124#define PXP2_REG_PSWRQ_BW_L1 0x1202b0
3125#define PXP2_REG_PSWRQ_BW_L10 0x1202d4
3126#define PXP2_REG_PSWRQ_BW_L11 0x1202d8
3127#define PXP2_REG_PSWRQ_BW_L2 0x1202b4
3128#define PXP2_REG_PSWRQ_BW_L28 0x120318
3129#define PXP2_REG_PSWRQ_BW_L3 0x1202b8
3130#define PXP2_REG_PSWRQ_BW_L6 0x1202c4
3131#define PXP2_REG_PSWRQ_BW_L7 0x1202c8
3132#define PXP2_REG_PSWRQ_BW_L8 0x1202cc
3133#define PXP2_REG_PSWRQ_BW_L9 0x1202d0
3134#define PXP2_REG_PSWRQ_BW_RD 0x120324
3135#define PXP2_REG_PSWRQ_BW_UB1 0x120238
3136#define PXP2_REG_PSWRQ_BW_UB10 0x12025c
3137#define PXP2_REG_PSWRQ_BW_UB11 0x120260
3138#define PXP2_REG_PSWRQ_BW_UB2 0x12023c
3139#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0
3140#define PXP2_REG_PSWRQ_BW_UB3 0x120240
3141#define PXP2_REG_PSWRQ_BW_UB6 0x12024c
3142#define PXP2_REG_PSWRQ_BW_UB7 0x120250
3143#define PXP2_REG_PSWRQ_BW_UB8 0x120254
3144#define PXP2_REG_PSWRQ_BW_UB9 0x120258
3145#define PXP2_REG_PSWRQ_BW_WR 0x120328
3146#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000
3147#define PXP2_REG_PSWRQ_QM0_L2P 0x120038
3148#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
3149#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
3150#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
3151/* [RW 32] Interrupt mask register #0 read/write */
3152#define PXP2_REG_PXP2_INT_MASK_0 0x120578
3153/* [R 32] Interrupt register #0 read */
3154#define PXP2_REG_PXP2_INT_STS_0 0x12056c
3155#define PXP2_REG_PXP2_INT_STS_1 0x120608
3156/* [RC 32] Interrupt register #0 read clear */
3157#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
3158/* [RW 32] Parity mask register #0 read/write */
3159#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
3160#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
3161/* [R 32] Parity register #0 read */
3162#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
3163#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
3164/* [RC 32] Parity register #0 read clear */
3165#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
3166#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
3167/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
3168 indication about backpressure) */
3169#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
3170/* [R 8] Debug only: The blocks counter - number of unused block ids */
3171#define PXP2_REG_RD_BLK_CNT 0x120418
3172/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
3173 Must be bigger than 6. Normally should not be changed. */
3174#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c
3175/* [RW 2] CDU byte swapping mode configuration for master read requests */
3176#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404
3177/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
3178#define PXP2_REG_RD_DISABLE_INPUTS 0x120374
3179/* [R 1] PSWRD internal memories initialization is done */
3180#define PXP2_REG_RD_INIT_DONE 0x120370
3181/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3182 allocated for vq10 */
3183#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0
3184/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3185 allocated for vq11 */
3186#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4
3187/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3188 allocated for vq17 */
3189#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc
3190/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3191 allocated for vq18 */
3192#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0
3193/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3194 allocated for vq19 */
3195#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4
3196/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3197 allocated for vq22 */
3198#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
3199/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3200 allocated for vq25 */
3201#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
3202/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3203 allocated for vq6 */
3204#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
3205/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
3206 allocated for vq9 */
3207#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c
3208/* [RW 2] PBF byte swapping mode configuration for master read requests */
3209#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4
3210/* [R 1] Debug only: Indication if delivery ports are idle */
3211#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c
3212#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420
3213/* [RW 2] QM byte swapping mode configuration for master read requests */
3214#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8
3215/* [R 7] Debug only: The SR counter - number of unused sub request ids */
3216#define PXP2_REG_RD_SR_CNT 0x120414
3217/* [RW 2] SRC byte swapping mode configuration for master read requests */
3218#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400
3219/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
3220 be bigger than 1. Normally should not be changed. */
3221#define PXP2_REG_RD_SR_NUM_CFG 0x120408
3222/* [RW 1] Signals the PSWRD block to start initializing internal memories */
3223#define PXP2_REG_RD_START_INIT 0x12036c
3224/* [RW 2] TM byte swapping mode configuration for master read requests */
3225#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc
3226/* [RW 10] Bandwidth addition to VQ0 write requests */
3227#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc
3228/* [RW 10] Bandwidth addition to VQ12 read requests */
3229#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec
3230/* [RW 10] Bandwidth addition to VQ13 read requests */
3231#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0
3232/* [RW 10] Bandwidth addition to VQ14 read requests */
3233#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4
3234/* [RW 10] Bandwidth addition to VQ15 read requests */
3235#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8
3236/* [RW 10] Bandwidth addition to VQ16 read requests */
3237#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc
3238/* [RW 10] Bandwidth addition to VQ17 read requests */
3239#define PXP2_REG_RQ_BW_RD_ADD17 0x120200
3240/* [RW 10] Bandwidth addition to VQ18 read requests */
3241#define PXP2_REG_RQ_BW_RD_ADD18 0x120204
3242/* [RW 10] Bandwidth addition to VQ19 read requests */
3243#define PXP2_REG_RQ_BW_RD_ADD19 0x120208
3244/* [RW 10] Bandwidth addition to VQ20 read requests */
3245#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c
3246/* [RW 10] Bandwidth addition to VQ22 read requests */
3247#define PXP2_REG_RQ_BW_RD_ADD22 0x120210
3248/* [RW 10] Bandwidth addition to VQ23 read requests */
3249#define PXP2_REG_RQ_BW_RD_ADD23 0x120214
3250/* [RW 10] Bandwidth addition to VQ24 read requests */
3251#define PXP2_REG_RQ_BW_RD_ADD24 0x120218
3252/* [RW 10] Bandwidth addition to VQ25 read requests */
3253#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c
3254/* [RW 10] Bandwidth addition to VQ26 read requests */
3255#define PXP2_REG_RQ_BW_RD_ADD26 0x120220
3256/* [RW 10] Bandwidth addition to VQ27 read requests */
3257#define PXP2_REG_RQ_BW_RD_ADD27 0x120224
3258/* [RW 10] Bandwidth addition to VQ4 read requests */
3259#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc
3260/* [RW 10] Bandwidth addition to VQ5 read requests */
3261#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0
3262/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
3263#define PXP2_REG_RQ_BW_RD_L0 0x1202ac
3264/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
3265#define PXP2_REG_RQ_BW_RD_L12 0x1202dc
3266/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
3267#define PXP2_REG_RQ_BW_RD_L13 0x1202e0
3268/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
3269#define PXP2_REG_RQ_BW_RD_L14 0x1202e4
3270/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
3271#define PXP2_REG_RQ_BW_RD_L15 0x1202e8
3272/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
3273#define PXP2_REG_RQ_BW_RD_L16 0x1202ec
3274/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
3275#define PXP2_REG_RQ_BW_RD_L17 0x1202f0
3276/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
3277#define PXP2_REG_RQ_BW_RD_L18 0x1202f4
3278/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
3279#define PXP2_REG_RQ_BW_RD_L19 0x1202f8
3280/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
3281#define PXP2_REG_RQ_BW_RD_L20 0x1202fc
3282/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
3283#define PXP2_REG_RQ_BW_RD_L22 0x120300
3284/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
3285#define PXP2_REG_RQ_BW_RD_L23 0x120304
3286/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
3287#define PXP2_REG_RQ_BW_RD_L24 0x120308
3288/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
3289#define PXP2_REG_RQ_BW_RD_L25 0x12030c
3290/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
3291#define PXP2_REG_RQ_BW_RD_L26 0x120310
3292/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
3293#define PXP2_REG_RQ_BW_RD_L27 0x120314
3294/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
3295#define PXP2_REG_RQ_BW_RD_L4 0x1202bc
3296/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
3297#define PXP2_REG_RQ_BW_RD_L5 0x1202c0
3298/* [RW 7] Bandwidth upper bound for VQ0 read requests */
3299#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234
3300/* [RW 7] Bandwidth upper bound for VQ12 read requests */
3301#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264
3302/* [RW 7] Bandwidth upper bound for VQ13 read requests */
3303#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268
3304/* [RW 7] Bandwidth upper bound for VQ14 read requests */
3305#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c
3306/* [RW 7] Bandwidth upper bound for VQ15 read requests */
3307#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270
3308/* [RW 7] Bandwidth upper bound for VQ16 read requests */
3309#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274
3310/* [RW 7] Bandwidth upper bound for VQ17 read requests */
3311#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278
3312/* [RW 7] Bandwidth upper bound for VQ18 read requests */
3313#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c
3314/* [RW 7] Bandwidth upper bound for VQ19 read requests */
3315#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280
3316/* [RW 7] Bandwidth upper bound for VQ20 read requests */
3317#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284
3318/* [RW 7] Bandwidth upper bound for VQ22 read requests */
3319#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288
3320/* [RW 7] Bandwidth upper bound for VQ23 read requests */
3321#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c
3322/* [RW 7] Bandwidth upper bound for VQ24 read requests */
3323#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290
3324/* [RW 7] Bandwidth upper bound for VQ25 read requests */
3325#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294
3326/* [RW 7] Bandwidth upper bound for VQ26 read requests */
3327#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298
3328/* [RW 7] Bandwidth upper bound for VQ27 read requests */
3329#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c
3330/* [RW 7] Bandwidth upper bound for VQ4 read requests */
3331#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244
3332/* [RW 7] Bandwidth upper bound for VQ5 read requests */
3333#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248
3334/* [RW 10] Bandwidth addition to VQ29 write requests */
3335#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c
3336/* [RW 10] Bandwidth addition to VQ30 write requests */
3337#define PXP2_REG_RQ_BW_WR_ADD30 0x120230
3338/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
3339#define PXP2_REG_RQ_BW_WR_L29 0x12031c
3340/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
3341#define PXP2_REG_RQ_BW_WR_L30 0x120320
3342/* [RW 7] Bandwidth upper bound for VQ29 */
3343#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
3344/* [RW 7] Bandwidth upper bound for VQ30 */
3345#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
3346/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
3347#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
3348/* [RW 2] Endian mode for cdu */
3349#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
3350#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
3351#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
3352/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
3353 -128k */
3354#define PXP2_REG_RQ_CDU_P_SIZE 0x120018
3355/* [R 1] 1' indicates that the requester has finished its internal
3356 configuration */
3357#define PXP2_REG_RQ_CFG_DONE 0x1201b4
3358/* [RW 2] Endian mode for debug */
3359#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4
3360/* [RW 1] When '1'; requests will enter input buffers but wont get out
3361 towards the glue */
3362#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
3363/* [RW 4] Determines alignment of write SRs when a request is split into
3364 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
3365 * aligned. 4 - 512B aligned. */
3366#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
3367/* [RW 4] Determines alignment of read SRs when a request is split into
3368 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
3369 * aligned. 4 - 512B aligned. */
3370#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
3371/* [RW 1] when set the new alignment method (E2) will be applied; when reset
3372 * the original alignment method (E1 E1H) will be applied */
3373#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
3374/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
3375 be asserted */
3376#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
3377/* [RW 2] Endian mode for hc */
3378#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
3379/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
3380 compatibility needs; Note that different registers are used per mode */
3381#define PXP2_REG_RQ_ILT_MODE 0x1205b4
3382/* [WB 53] Onchip address table */
3383#define PXP2_REG_RQ_ONCHIP_AT 0x122000
3384/* [WB 53] Onchip address table - B0 */
3385#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
3386/* [RW 13] Pending read limiter threshold; in Dwords */
3387#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
3388/* [RW 2] Endian mode for qm */
3389#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
3390#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
3391#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
3392/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
3393 -128k */
3394#define PXP2_REG_RQ_QM_P_SIZE 0x120050
3395/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
3396#define PXP2_REG_RQ_RBC_DONE 0x1201b0
3397/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
3398 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
3399#define PXP2_REG_RQ_RD_MBS0 0x120160
3400/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
3401 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
3402#define PXP2_REG_RQ_RD_MBS1 0x120168
3403/* [RW 2] Endian mode for src */
3404#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
3405#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
3406#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
3407/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
3408 -128k */
3409#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
3410/* [RW 2] Endian mode for tm */
3411#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
3412#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
3413#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
3414/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
3415 -128k */
3416#define PXP2_REG_RQ_TM_P_SIZE 0x120034
3417/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
3418#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
3419/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
3420#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
3421/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
3422#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
3423/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
3424#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818
3425/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
3426#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820
3427/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
3428#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828
3429/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
3430#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830
3431/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
3432#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838
3433/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
3434#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840
3435/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
3436#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848
3437/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
3438#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850
3439/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
3440#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858
3441/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
3442#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860
3443/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
3444#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868
3445/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
3446#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870
3447/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
3448#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878
3449/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
3450#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880
3451/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
3452#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888
3453/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
3454#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890
3455/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
3456#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898
3457/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
3458#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0
3459/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
3460#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8
3461/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
3462#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0
3463/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
3464#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8
3465/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
3466#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0
3467/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
3468#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8
3469/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
3470#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0
3471/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
3472#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8
3473/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
3474#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0
3475/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
3476#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8
3477/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
3478#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0
3479/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
3480#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8
3481/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
3482#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900
3483/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
3484#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908
3485/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
3486 001:256B; 010: 512B; */
3487#define PXP2_REG_RQ_WR_MBS0 0x12015c
3488/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
3489 001:256B; 010: 512B; */
3490#define PXP2_REG_RQ_WR_MBS1 0x120164
3491/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3492 buffer reaches this number has_payload will be asserted */
3493#define PXP2_REG_WR_CDU_MPS 0x1205f0
3494/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3495 buffer reaches this number has_payload will be asserted */
3496#define PXP2_REG_WR_CSDM_MPS 0x1205d0
3497/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3498 buffer reaches this number has_payload will be asserted */
3499#define PXP2_REG_WR_DBG_MPS 0x1205e8
3500/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3501 buffer reaches this number has_payload will be asserted */
3502#define PXP2_REG_WR_DMAE_MPS 0x1205ec
3503/* [RW 10] if Number of entries in dmae fifo will be higher than this
3504 threshold then has_payload indication will be asserted; the default value
3505 should be equal to &gt; write MBS size! */
3506#define PXP2_REG_WR_DMAE_TH 0x120368
3507/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3508 buffer reaches this number has_payload will be asserted */
3509#define PXP2_REG_WR_HC_MPS 0x1205c8
3510/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3511 buffer reaches this number has_payload will be asserted */
3512#define PXP2_REG_WR_QM_MPS 0x1205dc
3513/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
3514#define PXP2_REG_WR_REV_MODE 0x120670
3515/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3516 buffer reaches this number has_payload will be asserted */
3517#define PXP2_REG_WR_SRC_MPS 0x1205e4
3518/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3519 buffer reaches this number has_payload will be asserted */
3520#define PXP2_REG_WR_TM_MPS 0x1205e0
3521/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3522 buffer reaches this number has_payload will be asserted */
3523#define PXP2_REG_WR_TSDM_MPS 0x1205d4
3524/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
3525 threshold then has_payload indication will be asserted; the default value
3526 should be equal to &gt; write MBS size! */
3527#define PXP2_REG_WR_USDMDP_TH 0x120348
3528/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3529 buffer reaches this number has_payload will be asserted */
3530#define PXP2_REG_WR_USDM_MPS 0x1205cc
3531/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
3532 buffer reaches this number has_payload will be asserted */
3533#define PXP2_REG_WR_XSDM_MPS 0x1205d8
3534/* [R 1] debug only: Indication if PSWHST arbiter is idle */
3535#define PXP_REG_HST_ARB_IS_IDLE 0x103004
3536/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
3537 this client is waiting for the arbiter. */
3538#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
3539/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
3540 block. Should be used for close the gates. */
3541#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
3542/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
3543 should update according to 'hst_discard_doorbells' register when the state
3544 machine is idle */
3545#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
3546/* [RW 1] When 1; new internal writes arriving to the block are discarded.
3547 Should be used for close the gates. */
3548#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
3549/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
3550 means this PSWHST is discarding inputs from this client. Each bit should
3551 update according to 'hst_discard_internal_writes' register when the state
3552 machine is idle. */
3553#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
3554/* [WB 160] Used for initialization of the inbound interrupts memory */
3555#define PXP_REG_HST_INBOUND_INT 0x103800
3556/* [RW 32] Interrupt mask register #0 read/write */
3557#define PXP_REG_PXP_INT_MASK_0 0x103074
3558#define PXP_REG_PXP_INT_MASK_1 0x103084
3559/* [R 32] Interrupt register #0 read */
3560#define PXP_REG_PXP_INT_STS_0 0x103068
3561#define PXP_REG_PXP_INT_STS_1 0x103078
3562/* [RC 32] Interrupt register #0 read clear */
3563#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
3564#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
3565/* [RW 27] Parity mask register #0 read/write */
3566#define PXP_REG_PXP_PRTY_MASK 0x103094
3567/* [R 26] Parity register #0 read */
3568#define PXP_REG_PXP_PRTY_STS 0x103088
3569/* [RC 27] Parity register #0 read clear */
3570#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
3571/* [RW 4] The activity counter initial increment value sent in the load
3572 request */
3573#define QM_REG_ACTCTRINITVAL_0 0x168040
3574#define QM_REG_ACTCTRINITVAL_1 0x168044
3575#define QM_REG_ACTCTRINITVAL_2 0x168048
3576#define QM_REG_ACTCTRINITVAL_3 0x16804c
3577/* [RW 32] The base logical address (in bytes) of each physical queue. The
3578 index I represents the physical queue number. The 12 lsbs are ignore and
3579 considered zero so practically there are only 20 bits in this register;
3580 queues 63-0 */
3581#define QM_REG_BASEADDR 0x168900
3582/* [RW 32] The base logical address (in bytes) of each physical queue. The
3583 index I represents the physical queue number. The 12 lsbs are ignore and
3584 considered zero so practically there are only 20 bits in this register;
3585 queues 127-64 */
3586#define QM_REG_BASEADDR_EXT_A 0x16e100
3587/* [RW 16] The byte credit cost for each task. This value is for both ports */
3588#define QM_REG_BYTECRDCOST 0x168234
3589/* [RW 16] The initial byte credit value for both ports. */
3590#define QM_REG_BYTECRDINITVAL 0x168238
3591/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
3592 queue uses port 0 else it uses port 1; queues 31-0 */
3593#define QM_REG_BYTECRDPORT_LSB 0x168228
3594/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
3595 queue uses port 0 else it uses port 1; queues 95-64 */
3596#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
3597/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
3598 queue uses port 0 else it uses port 1; queues 63-32 */
3599#define QM_REG_BYTECRDPORT_MSB 0x168224
3600/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
3601 queue uses port 0 else it uses port 1; queues 127-96 */
3602#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
3603/* [RW 16] The byte credit value that if above the QM is considered almost
3604 full */
3605#define QM_REG_BYTECREDITAFULLTHR 0x168094
3606/* [RW 4] The initial credit for interface */
3607#define QM_REG_CMINITCRD_0 0x1680cc
3608#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
3609#define QM_REG_CMINITCRD_1 0x1680d0
3610#define QM_REG_CMINITCRD_2 0x1680d4
3611#define QM_REG_CMINITCRD_3 0x1680d8
3612#define QM_REG_CMINITCRD_4 0x1680dc
3613#define QM_REG_CMINITCRD_5 0x1680e0
3614#define QM_REG_CMINITCRD_6 0x1680e4
3615#define QM_REG_CMINITCRD_7 0x1680e8
3616/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
3617 is masked */
3618#define QM_REG_CMINTEN 0x1680ec
3619/* [RW 12] A bit vector which indicates which one of the queues are tied to
3620 interface 0 */
3621#define QM_REG_CMINTVOQMASK_0 0x1681f4
3622#define QM_REG_CMINTVOQMASK_1 0x1681f8
3623#define QM_REG_CMINTVOQMASK_2 0x1681fc
3624#define QM_REG_CMINTVOQMASK_3 0x168200
3625#define QM_REG_CMINTVOQMASK_4 0x168204
3626#define QM_REG_CMINTVOQMASK_5 0x168208
3627#define QM_REG_CMINTVOQMASK_6 0x16820c
3628#define QM_REG_CMINTVOQMASK_7 0x168210
3629/* [RW 20] The number of connections divided by 16 which dictates the size
3630 of each queue which belongs to even function number. */
3631#define QM_REG_CONNNUM_0 0x168020
3632/* [R 6] Keep the fill level of the fifo from write client 4 */
3633#define QM_REG_CQM_WRC_FIFOLVL 0x168018
3634/* [RW 8] The context regions sent in the CFC load request */
3635#define QM_REG_CTXREG_0 0x168030
3636#define QM_REG_CTXREG_1 0x168034
3637#define QM_REG_CTXREG_2 0x168038
3638#define QM_REG_CTXREG_3 0x16803c
3639/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
3640 bypass enable */
3641#define QM_REG_ENBYPVOQMASK 0x16823c
3642/* [RW 32] A bit mask per each physical queue. If a bit is set then the
3643 physical queue uses the byte credit; queues 31-0 */
3644#define QM_REG_ENBYTECRD_LSB 0x168220
3645/* [RW 32] A bit mask per each physical queue. If a bit is set then the
3646 physical queue uses the byte credit; queues 95-64 */
3647#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
3648/* [RW 32] A bit mask per each physical queue. If a bit is set then the
3649 physical queue uses the byte credit; queues 63-32 */
3650#define QM_REG_ENBYTECRD_MSB 0x16821c
3651/* [RW 32] A bit mask per each physical queue. If a bit is set then the
3652 physical queue uses the byte credit; queues 127-96 */
3653#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
3654/* [RW 4] If cleared then the secondary interface will not be served by the
3655 RR arbiter */
3656#define QM_REG_ENSEC 0x1680f0
3657/* [RW 32] NA */
3658#define QM_REG_FUNCNUMSEL_LSB 0x168230
3659/* [RW 32] NA */
3660#define QM_REG_FUNCNUMSEL_MSB 0x16822c
3661/* [RW 32] A mask register to mask the Almost empty signals which will not
3662 be use for the almost empty indication to the HW block; queues 31:0 */
3663#define QM_REG_HWAEMPTYMASK_LSB 0x168218
3664/* [RW 32] A mask register to mask the Almost empty signals which will not
3665 be use for the almost empty indication to the HW block; queues 95-64 */
3666#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
3667/* [RW 32] A mask register to mask the Almost empty signals which will not
3668 be use for the almost empty indication to the HW block; queues 63:32 */
3669#define QM_REG_HWAEMPTYMASK_MSB 0x168214
3670/* [RW 32] A mask register to mask the Almost empty signals which will not
3671 be use for the almost empty indication to the HW block; queues 127-96 */
3672#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
3673/* [RW 4] The number of outstanding request to CFC */
3674#define QM_REG_OUTLDREQ 0x168804
3675/* [RC 1] A flag to indicate that overflow error occurred in one of the
3676 queues. */
3677#define QM_REG_OVFERROR 0x16805c
3678/* [RC 7] the Q where the overflow occurs */
3679#define QM_REG_OVFQNUM 0x168058
3680/* [R 16] Pause state for physical queues 15-0 */
3681#define QM_REG_PAUSESTATE0 0x168410
3682/* [R 16] Pause state for physical queues 31-16 */
3683#define QM_REG_PAUSESTATE1 0x168414
3684/* [R 16] Pause state for physical queues 47-32 */
3685#define QM_REG_PAUSESTATE2 0x16e684
3686/* [R 16] Pause state for physical queues 63-48 */
3687#define QM_REG_PAUSESTATE3 0x16e688
3688/* [R 16] Pause state for physical queues 79-64 */
3689#define QM_REG_PAUSESTATE4 0x16e68c
3690/* [R 16] Pause state for physical queues 95-80 */
3691#define QM_REG_PAUSESTATE5 0x16e690
3692/* [R 16] Pause state for physical queues 111-96 */
3693#define QM_REG_PAUSESTATE6 0x16e694
3694/* [R 16] Pause state for physical queues 127-112 */
3695#define QM_REG_PAUSESTATE7 0x16e698
3696/* [RW 2] The PCI attributes field used in the PCI request. */
3697#define QM_REG_PCIREQAT 0x168054
3698#define QM_REG_PF_EN 0x16e70c
3699/* [R 24] The number of tasks stored in the QM for the PF. only even
3700 * functions are valid in E2 (odd I registers will be hard wired to 0) */
3701#define QM_REG_PF_USG_CNT_0 0x16e040
3702/* [R 16] NOT USED */
3703#define QM_REG_PORT0BYTECRD 0x168300
3704/* [R 16] The byte credit of port 1 */
3705#define QM_REG_PORT1BYTECRD 0x168304
3706/* [RW 3] pci function number of queues 15-0 */
3707#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
3708#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
3709#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
3710#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
3711#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
3712#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
3713#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
3714#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
3715/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
3716 ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
3717 bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
3718#define QM_REG_PTRTBL 0x168a00
3719/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
3720 ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
3721 bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
3722#define QM_REG_PTRTBL_EXT_A 0x16e200
3723/* [RW 2] Interrupt mask register #0 read/write */
3724#define QM_REG_QM_INT_MASK 0x168444
3725/* [R 2] Interrupt register #0 read */
3726#define QM_REG_QM_INT_STS 0x168438
3727/* [RW 12] Parity mask register #0 read/write */
3728#define QM_REG_QM_PRTY_MASK 0x168454
3729/* [R 12] Parity register #0 read */
3730#define QM_REG_QM_PRTY_STS 0x168448
3731/* [RC 12] Parity register #0 read clear */
3732#define QM_REG_QM_PRTY_STS_CLR 0x16844c
3733/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
3734#define QM_REG_QSTATUS_HIGH 0x16802c
3735/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
3736#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
3737/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
3738#define QM_REG_QSTATUS_LOW 0x168028
3739/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
3740#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
3741/* [R 24] The number of tasks queued for each queue; queues 63-0 */
3742#define QM_REG_QTASKCTR_0 0x168308
3743/* [R 24] The number of tasks queued for each queue; queues 127-64 */
3744#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
3745/* [RW 4] Queue tied to VOQ */
3746#define QM_REG_QVOQIDX_0 0x1680f4
3747#define QM_REG_QVOQIDX_10 0x16811c
3748#define QM_REG_QVOQIDX_100 0x16e49c
3749#define QM_REG_QVOQIDX_101 0x16e4a0
3750#define QM_REG_QVOQIDX_102 0x16e4a4
3751#define QM_REG_QVOQIDX_103 0x16e4a8
3752#define QM_REG_QVOQIDX_104 0x16e4ac
3753#define QM_REG_QVOQIDX_105 0x16e4b0
3754#define QM_REG_QVOQIDX_106 0x16e4b4
3755#define QM_REG_QVOQIDX_107 0x16e4b8
3756#define QM_REG_QVOQIDX_108 0x16e4bc
3757#define QM_REG_QVOQIDX_109 0x16e4c0
3758#define QM_REG_QVOQIDX_11 0x168120
3759#define QM_REG_QVOQIDX_110 0x16e4c4
3760#define QM_REG_QVOQIDX_111 0x16e4c8
3761#define QM_REG_QVOQIDX_112 0x16e4cc
3762#define QM_REG_QVOQIDX_113 0x16e4d0
3763#define QM_REG_QVOQIDX_114 0x16e4d4
3764#define QM_REG_QVOQIDX_115 0x16e4d8
3765#define QM_REG_QVOQIDX_116 0x16e4dc
3766#define QM_REG_QVOQIDX_117 0x16e4e0
3767#define QM_REG_QVOQIDX_118 0x16e4e4
3768#define QM_REG_QVOQIDX_119 0x16e4e8
3769#define QM_REG_QVOQIDX_12 0x168124
3770#define QM_REG_QVOQIDX_120 0x16e4ec
3771#define QM_REG_QVOQIDX_121 0x16e4f0
3772#define QM_REG_QVOQIDX_122 0x16e4f4
3773#define QM_REG_QVOQIDX_123 0x16e4f8
3774#define QM_REG_QVOQIDX_124 0x16e4fc
3775#define QM_REG_QVOQIDX_125 0x16e500
3776#define QM_REG_QVOQIDX_126 0x16e504
3777#define QM_REG_QVOQIDX_127 0x16e508
3778#define QM_REG_QVOQIDX_13 0x168128
3779#define QM_REG_QVOQIDX_14 0x16812c
3780#define QM_REG_QVOQIDX_15 0x168130
3781#define QM_REG_QVOQIDX_16 0x168134
3782#define QM_REG_QVOQIDX_17 0x168138
3783#define QM_REG_QVOQIDX_21 0x168148
3784#define QM_REG_QVOQIDX_22 0x16814c
3785#define QM_REG_QVOQIDX_23 0x168150
3786#define QM_REG_QVOQIDX_24 0x168154
3787#define QM_REG_QVOQIDX_25 0x168158
3788#define QM_REG_QVOQIDX_26 0x16815c
3789#define QM_REG_QVOQIDX_27 0x168160
3790#define QM_REG_QVOQIDX_28 0x168164
3791#define QM_REG_QVOQIDX_29 0x168168
3792#define QM_REG_QVOQIDX_30 0x16816c
3793#define QM_REG_QVOQIDX_31 0x168170
3794#define QM_REG_QVOQIDX_32 0x168174
3795#define QM_REG_QVOQIDX_33 0x168178
3796#define QM_REG_QVOQIDX_34 0x16817c
3797#define QM_REG_QVOQIDX_35 0x168180
3798#define QM_REG_QVOQIDX_36 0x168184
3799#define QM_REG_QVOQIDX_37 0x168188
3800#define QM_REG_QVOQIDX_38 0x16818c
3801#define QM_REG_QVOQIDX_39 0x168190
3802#define QM_REG_QVOQIDX_40 0x168194
3803#define QM_REG_QVOQIDX_41 0x168198
3804#define QM_REG_QVOQIDX_42 0x16819c
3805#define QM_REG_QVOQIDX_43 0x1681a0
3806#define QM_REG_QVOQIDX_44 0x1681a4
3807#define QM_REG_QVOQIDX_45 0x1681a8
3808#define QM_REG_QVOQIDX_46 0x1681ac
3809#define QM_REG_QVOQIDX_47 0x1681b0
3810#define QM_REG_QVOQIDX_48 0x1681b4
3811#define QM_REG_QVOQIDX_49 0x1681b8
3812#define QM_REG_QVOQIDX_5 0x168108
3813#define QM_REG_QVOQIDX_50 0x1681bc
3814#define QM_REG_QVOQIDX_51 0x1681c0
3815#define QM_REG_QVOQIDX_52 0x1681c4
3816#define QM_REG_QVOQIDX_53 0x1681c8
3817#define QM_REG_QVOQIDX_54 0x1681cc
3818#define QM_REG_QVOQIDX_55 0x1681d0
3819#define QM_REG_QVOQIDX_56 0x1681d4
3820#define QM_REG_QVOQIDX_57 0x1681d8
3821#define QM_REG_QVOQIDX_58 0x1681dc
3822#define QM_REG_QVOQIDX_59 0x1681e0
3823#define QM_REG_QVOQIDX_6 0x16810c
3824#define QM_REG_QVOQIDX_60 0x1681e4
3825#define QM_REG_QVOQIDX_61 0x1681e8
3826#define QM_REG_QVOQIDX_62 0x1681ec
3827#define QM_REG_QVOQIDX_63 0x1681f0
3828#define QM_REG_QVOQIDX_64 0x16e40c
3829#define QM_REG_QVOQIDX_65 0x16e410
3830#define QM_REG_QVOQIDX_69 0x16e420
3831#define QM_REG_QVOQIDX_7 0x168110
3832#define QM_REG_QVOQIDX_70 0x16e424
3833#define QM_REG_QVOQIDX_71 0x16e428
3834#define QM_REG_QVOQIDX_72 0x16e42c
3835#define QM_REG_QVOQIDX_73 0x16e430
3836#define QM_REG_QVOQIDX_74 0x16e434
3837#define QM_REG_QVOQIDX_75 0x16e438
3838#define QM_REG_QVOQIDX_76 0x16e43c
3839#define QM_REG_QVOQIDX_77 0x16e440
3840#define QM_REG_QVOQIDX_78 0x16e444
3841#define QM_REG_QVOQIDX_79 0x16e448
3842#define QM_REG_QVOQIDX_8 0x168114
3843#define QM_REG_QVOQIDX_80 0x16e44c
3844#define QM_REG_QVOQIDX_81 0x16e450
3845#define QM_REG_QVOQIDX_85 0x16e460
3846#define QM_REG_QVOQIDX_86 0x16e464
3847#define QM_REG_QVOQIDX_87 0x16e468
3848#define QM_REG_QVOQIDX_88 0x16e46c
3849#define QM_REG_QVOQIDX_89 0x16e470
3850#define QM_REG_QVOQIDX_9 0x168118
3851#define QM_REG_QVOQIDX_90 0x16e474
3852#define QM_REG_QVOQIDX_91 0x16e478
3853#define QM_REG_QVOQIDX_92 0x16e47c
3854#define QM_REG_QVOQIDX_93 0x16e480
3855#define QM_REG_QVOQIDX_94 0x16e484
3856#define QM_REG_QVOQIDX_95 0x16e488
3857#define QM_REG_QVOQIDX_96 0x16e48c
3858#define QM_REG_QVOQIDX_97 0x16e490
3859#define QM_REG_QVOQIDX_98 0x16e494
3860#define QM_REG_QVOQIDX_99 0x16e498
3861/* [RW 1] Initialization bit command */
3862#define QM_REG_SOFT_RESET 0x168428
3863/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
3864#define QM_REG_TASKCRDCOST_0 0x16809c
3865#define QM_REG_TASKCRDCOST_1 0x1680a0
3866#define QM_REG_TASKCRDCOST_2 0x1680a4
3867#define QM_REG_TASKCRDCOST_4 0x1680ac
3868#define QM_REG_TASKCRDCOST_5 0x1680b0
3869/* [R 6] Keep the fill level of the fifo from write client 3 */
3870#define QM_REG_TQM_WRC_FIFOLVL 0x168010
3871/* [R 6] Keep the fill level of the fifo from write client 2 */
3872#define QM_REG_UQM_WRC_FIFOLVL 0x168008
3873/* [RC 32] Credit update error register */
3874#define QM_REG_VOQCRDERRREG 0x168408
3875/* [R 16] The credit value for each VOQ */
3876#define QM_REG_VOQCREDIT_0 0x1682d0
3877#define QM_REG_VOQCREDIT_1 0x1682d4
3878#define QM_REG_VOQCREDIT_4 0x1682e0
3879/* [RW 16] The credit value that if above the QM is considered almost full */
3880#define QM_REG_VOQCREDITAFULLTHR 0x168090
3881/* [RW 16] The init and maximum credit for each VoQ */
3882#define QM_REG_VOQINITCREDIT_0 0x168060
3883#define QM_REG_VOQINITCREDIT_1 0x168064
3884#define QM_REG_VOQINITCREDIT_2 0x168068
3885#define QM_REG_VOQINITCREDIT_4 0x168070
3886#define QM_REG_VOQINITCREDIT_5 0x168074
3887/* [RW 1] The port of which VOQ belongs */
3888#define QM_REG_VOQPORT_0 0x1682a0
3889#define QM_REG_VOQPORT_1 0x1682a4
3890#define QM_REG_VOQPORT_2 0x1682a8
3891/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3892#define QM_REG_VOQQMASK_0_LSB 0x168240
3893/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3894#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
3895/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3896#define QM_REG_VOQQMASK_0_MSB 0x168244
3897/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3898#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
3899/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3900#define QM_REG_VOQQMASK_10_LSB 0x168290
3901/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3902#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
3903/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3904#define QM_REG_VOQQMASK_10_MSB 0x168294
3905/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3906#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
3907/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3908#define QM_REG_VOQQMASK_11_LSB 0x168298
3909/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3910#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
3911/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3912#define QM_REG_VOQQMASK_11_MSB 0x16829c
3913/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3914#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
3915/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3916#define QM_REG_VOQQMASK_1_LSB 0x168248
3917/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3918#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
3919/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3920#define QM_REG_VOQQMASK_1_MSB 0x16824c
3921/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3922#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
3923/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3924#define QM_REG_VOQQMASK_2_LSB 0x168250
3925/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3926#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
3927/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3928#define QM_REG_VOQQMASK_2_MSB 0x168254
3929/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3930#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
3931/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3932#define QM_REG_VOQQMASK_3_LSB 0x168258
3933/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3934#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
3935/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3936#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
3937/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3938#define QM_REG_VOQQMASK_4_LSB 0x168260
3939/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3940#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
3941/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3942#define QM_REG_VOQQMASK_4_MSB 0x168264
3943/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3944#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
3945/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3946#define QM_REG_VOQQMASK_5_LSB 0x168268
3947/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3948#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
3949/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3950#define QM_REG_VOQQMASK_5_MSB 0x16826c
3951/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3952#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
3953/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3954#define QM_REG_VOQQMASK_6_LSB 0x168270
3955/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3956#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
3957/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3958#define QM_REG_VOQQMASK_6_MSB 0x168274
3959/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3960#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
3961/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3962#define QM_REG_VOQQMASK_7_LSB 0x168278
3963/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3964#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
3965/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3966#define QM_REG_VOQQMASK_7_MSB 0x16827c
3967/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3968#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
3969/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3970#define QM_REG_VOQQMASK_8_LSB 0x168280
3971/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3972#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
3973/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
3974#define QM_REG_VOQQMASK_8_MSB 0x168284
3975/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3976#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
3977/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
3978#define QM_REG_VOQQMASK_9_LSB 0x168288
3979/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
3980#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
3981/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
3982#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
3983/* [RW 32] Wrr weights */
3984#define QM_REG_WRRWEIGHTS_0 0x16880c
3985#define QM_REG_WRRWEIGHTS_1 0x168810
3986#define QM_REG_WRRWEIGHTS_10 0x168814
3987#define QM_REG_WRRWEIGHTS_11 0x168818
3988#define QM_REG_WRRWEIGHTS_12 0x16881c
3989#define QM_REG_WRRWEIGHTS_13 0x168820
3990#define QM_REG_WRRWEIGHTS_14 0x168824
3991#define QM_REG_WRRWEIGHTS_15 0x168828
3992#define QM_REG_WRRWEIGHTS_16 0x16e000
3993#define QM_REG_WRRWEIGHTS_17 0x16e004
3994#define QM_REG_WRRWEIGHTS_18 0x16e008
3995#define QM_REG_WRRWEIGHTS_19 0x16e00c
3996#define QM_REG_WRRWEIGHTS_2 0x16882c
3997#define QM_REG_WRRWEIGHTS_20 0x16e010
3998#define QM_REG_WRRWEIGHTS_21 0x16e014
3999#define QM_REG_WRRWEIGHTS_22 0x16e018
4000#define QM_REG_WRRWEIGHTS_23 0x16e01c
4001#define QM_REG_WRRWEIGHTS_24 0x16e020
4002#define QM_REG_WRRWEIGHTS_25 0x16e024
4003#define QM_REG_WRRWEIGHTS_26 0x16e028
4004#define QM_REG_WRRWEIGHTS_27 0x16e02c
4005#define QM_REG_WRRWEIGHTS_28 0x16e030
4006#define QM_REG_WRRWEIGHTS_29 0x16e034
4007#define QM_REG_WRRWEIGHTS_3 0x168830
4008#define QM_REG_WRRWEIGHTS_30 0x16e038
4009#define QM_REG_WRRWEIGHTS_31 0x16e03c
4010#define QM_REG_WRRWEIGHTS_4 0x168834
4011#define QM_REG_WRRWEIGHTS_5 0x168838
4012#define QM_REG_WRRWEIGHTS_6 0x16883c
4013#define QM_REG_WRRWEIGHTS_7 0x168840
4014#define QM_REG_WRRWEIGHTS_8 0x168844
4015#define QM_REG_WRRWEIGHTS_9 0x168848
4016/* [R 6] Keep the fill level of the fifo from write client 1 */
4017#define QM_REG_XQM_WRC_FIFOLVL 0x168000
4018/* [W 1] reset to parity interrupt */
4019#define SEM_FAST_REG_PARITY_RST 0x18840
4020#define SRC_REG_COUNTFREE0 0x40500
4021/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
4022 ports. If set the searcher support 8 functions. */
4023#define SRC_REG_E1HMF_ENABLE 0x404cc
4024#define SRC_REG_FIRSTFREE0 0x40510
4025#define SRC_REG_KEYRSS0_0 0x40408
4026#define SRC_REG_KEYRSS0_7 0x40424
4027#define SRC_REG_KEYRSS1_9 0x40454
4028#define SRC_REG_KEYSEARCH_0 0x40458
4029#define SRC_REG_KEYSEARCH_1 0x4045c
4030#define SRC_REG_KEYSEARCH_2 0x40460
4031#define SRC_REG_KEYSEARCH_3 0x40464
4032#define SRC_REG_KEYSEARCH_4 0x40468
4033#define SRC_REG_KEYSEARCH_5 0x4046c
4034#define SRC_REG_KEYSEARCH_6 0x40470
4035#define SRC_REG_KEYSEARCH_7 0x40474
4036#define SRC_REG_KEYSEARCH_8 0x40478
4037#define SRC_REG_KEYSEARCH_9 0x4047c
4038#define SRC_REG_LASTFREE0 0x40530
4039#define SRC_REG_NUMBER_HASH_BITS0 0x40400
4040/* [RW 1] Reset internal state machines. */
4041#define SRC_REG_SOFT_RST 0x4049c
4042/* [R 3] Interrupt register #0 read */
4043#define SRC_REG_SRC_INT_STS 0x404ac
4044/* [RW 3] Parity mask register #0 read/write */
4045#define SRC_REG_SRC_PRTY_MASK 0x404c8
4046/* [R 3] Parity register #0 read */
4047#define SRC_REG_SRC_PRTY_STS 0x404bc
4048/* [RC 3] Parity register #0 read clear */
4049#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
4050/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
4051#define TCM_REG_CAM_OCCUP 0x5017c
4052/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
4053 disregarded; valid output is deasserted; all other signals are treated as
4054 usual; if 1 - normal activity. */
4055#define TCM_REG_CDU_AG_RD_IFEN 0x50034
4056/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
4057 are disregarded; all other signals are treated as usual; if 1 - normal
4058 activity. */
4059#define TCM_REG_CDU_AG_WR_IFEN 0x50030
4060/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
4061 disregarded; valid output is deasserted; all other signals are treated as
4062 usual; if 1 - normal activity. */
4063#define TCM_REG_CDU_SM_RD_IFEN 0x5003c
4064/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
4065 input is disregarded; all other signals are treated as usual; if 1 -
4066 normal activity. */
4067#define TCM_REG_CDU_SM_WR_IFEN 0x50038
4068/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
4069 the initial credit value; read returns the current value of the credit
4070 counter. Must be initialized to 1 at start-up. */
4071#define TCM_REG_CFC_INIT_CRD 0x50204
4072/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
4073 weight 8 (the most prioritised); 1 stands for weight 1(least
4074 prioritised); 2 stands for weight 2; tc. */
4075#define TCM_REG_CP_WEIGHT 0x500c0
4076/* [RW 1] Input csem Interface enable. If 0 - the valid input is
4077 disregarded; acknowledge output is deasserted; all other signals are
4078 treated as usual; if 1 - normal activity. */
4079#define TCM_REG_CSEM_IFEN 0x5002c
4080/* [RC 1] Message length mismatch (relative to last indication) at the In#9
4081 interface. */
4082#define TCM_REG_CSEM_LENGTH_MIS 0x50174
4083/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
4084 weight 8 (the most prioritised); 1 stands for weight 1(least
4085 prioritised); 2 stands for weight 2; tc. */
4086#define TCM_REG_CSEM_WEIGHT 0x500bc
4087/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
4088#define TCM_REG_ERR_EVNT_ID 0x500a0
4089/* [RW 28] The CM erroneous header for QM and Timers formatting. */
4090#define TCM_REG_ERR_TCM_HDR 0x5009c
4091/* [RW 8] The Event ID for Timers expiration. */
4092#define TCM_REG_EXPR_EVNT_ID 0x500a4
4093/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
4094 writes the initial credit value; read returns the current value of the
4095 credit counter. Must be initialized to 64 at start-up. */
4096#define TCM_REG_FIC0_INIT_CRD 0x5020c
4097/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
4098 writes the initial credit value; read returns the current value of the
4099 credit counter. Must be initialized to 64 at start-up. */
4100#define TCM_REG_FIC1_INIT_CRD 0x50210
4101/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
4102 - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
4103 ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
4104 ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
4105#define TCM_REG_GR_ARB_TYPE 0x50114
4106/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
4107 highest priority is 3. It is supposed that the Store channel is the
4108 compliment of the other 3 groups. */
4109#define TCM_REG_GR_LD0_PR 0x5011c
4110/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
4111 highest priority is 3. It is supposed that the Store channel is the
4112 compliment of the other 3 groups. */
4113#define TCM_REG_GR_LD1_PR 0x50120
4114/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
4115 sent to STORM; for a specific connection type. The double REG-pairs are
4116 used to align to STORM context row size of 128 bits. The offset of these
4117 data in the STORM context is always 0. Index _i stands for the connection
4118 type (one of 16). */
4119#define TCM_REG_N_SM_CTX_LD_0 0x50050
4120#define TCM_REG_N_SM_CTX_LD_1 0x50054
4121#define TCM_REG_N_SM_CTX_LD_2 0x50058
4122#define TCM_REG_N_SM_CTX_LD_3 0x5005c
4123#define TCM_REG_N_SM_CTX_LD_4 0x50060
4124#define TCM_REG_N_SM_CTX_LD_5 0x50064
4125/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
4126 acknowledge output is deasserted; all other signals are treated as usual;
4127 if 1 - normal activity. */
4128#define TCM_REG_PBF_IFEN 0x50024
4129/* [RC 1] Message length mismatch (relative to last indication) at the In#7
4130 interface. */
4131#define TCM_REG_PBF_LENGTH_MIS 0x5016c
4132/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
4133 weight 8 (the most prioritised); 1 stands for weight 1(least
4134 prioritised); 2 stands for weight 2; tc. */
4135#define TCM_REG_PBF_WEIGHT 0x500b4
4136#define TCM_REG_PHYS_QNUM0_0 0x500e0
4137#define TCM_REG_PHYS_QNUM0_1 0x500e4
4138#define TCM_REG_PHYS_QNUM1_0 0x500e8
4139#define TCM_REG_PHYS_QNUM1_1 0x500ec
4140#define TCM_REG_PHYS_QNUM2_0 0x500f0
4141#define TCM_REG_PHYS_QNUM2_1 0x500f4
4142#define TCM_REG_PHYS_QNUM3_0 0x500f8
4143#define TCM_REG_PHYS_QNUM3_1 0x500fc
4144/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
4145 acknowledge output is deasserted; all other signals are treated as usual;
4146 if 1 - normal activity. */
4147#define TCM_REG_PRS_IFEN 0x50020
4148/* [RC 1] Message length mismatch (relative to last indication) at the In#6
4149 interface. */
4150#define TCM_REG_PRS_LENGTH_MIS 0x50168
4151/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
4152 weight 8 (the most prioritised); 1 stands for weight 1(least
4153 prioritised); 2 stands for weight 2; tc. */
4154#define TCM_REG_PRS_WEIGHT 0x500b0
4155/* [RW 8] The Event ID for Timers formatting in case of stop done. */
4156#define TCM_REG_STOP_EVNT_ID 0x500a8
4157/* [RC 1] Message length mismatch (relative to last indication) at the STORM
4158 interface. */
4159#define TCM_REG_STORM_LENGTH_MIS 0x50160
4160/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
4161 disregarded; acknowledge output is deasserted; all other signals are
4162 treated as usual; if 1 - normal activity. */
4163#define TCM_REG_STORM_TCM_IFEN 0x50010
4164/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
4165 weight 8 (the most prioritised); 1 stands for weight 1(least
4166 prioritised); 2 stands for weight 2; tc. */
4167#define TCM_REG_STORM_WEIGHT 0x500ac
4168/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
4169 acknowledge output is deasserted; all other signals are treated as usual;
4170 if 1 - normal activity. */
4171#define TCM_REG_TCM_CFC_IFEN 0x50040
4172/* [RW 11] Interrupt mask register #0 read/write */
4173#define TCM_REG_TCM_INT_MASK 0x501dc
4174/* [R 11] Interrupt register #0 read */
4175#define TCM_REG_TCM_INT_STS 0x501d0
4176/* [RW 27] Parity mask register #0 read/write */
4177#define TCM_REG_TCM_PRTY_MASK 0x501ec
4178/* [R 27] Parity register #0 read */
4179#define TCM_REG_TCM_PRTY_STS 0x501e0
4180/* [RC 27] Parity register #0 read clear */
4181#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
4182/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
4183 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
4184 Is used to determine the number of the AG context REG-pairs written back;
4185 when the input message Reg1WbFlg isn't set. */
4186#define TCM_REG_TCM_REG0_SZ 0x500d8
4187/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
4188 disregarded; valid is deasserted; all other signals are treated as usual;
4189 if 1 - normal activity. */
4190#define TCM_REG_TCM_STORM0_IFEN 0x50004
4191/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
4192 disregarded; valid is deasserted; all other signals are treated as usual;
4193 if 1 - normal activity. */
4194#define TCM_REG_TCM_STORM1_IFEN 0x50008
4195/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
4196 disregarded; valid is deasserted; all other signals are treated as usual;
4197 if 1 - normal activity. */
4198#define TCM_REG_TCM_TQM_IFEN 0x5000c
4199/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
4200#define TCM_REG_TCM_TQM_USE_Q 0x500d4
4201/* [RW 28] The CM header for Timers expiration command. */
4202#define TCM_REG_TM_TCM_HDR 0x50098
4203/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
4204 disregarded; acknowledge output is deasserted; all other signals are
4205 treated as usual; if 1 - normal activity. */
4206#define TCM_REG_TM_TCM_IFEN 0x5001c
4207/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
4208 weight 8 (the most prioritised); 1 stands for weight 1(least
4209 prioritised); 2 stands for weight 2; tc. */
4210#define TCM_REG_TM_WEIGHT 0x500d0
4211/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
4212 the initial credit value; read returns the current value of the credit
4213 counter. Must be initialized to 32 at start-up. */
4214#define TCM_REG_TQM_INIT_CRD 0x5021c
4215/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
4216 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4217 prioritised); 2 stands for weight 2; tc. */
4218#define TCM_REG_TQM_P_WEIGHT 0x500c8
4219/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
4220 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4221 prioritised); 2 stands for weight 2; tc. */
4222#define TCM_REG_TQM_S_WEIGHT 0x500cc
4223/* [RW 28] The CM header value for QM request (primary). */
4224#define TCM_REG_TQM_TCM_HDR_P 0x50090
4225/* [RW 28] The CM header value for QM request (secondary). */
4226#define TCM_REG_TQM_TCM_HDR_S 0x50094
4227/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
4228 acknowledge output is deasserted; all other signals are treated as usual;
4229 if 1 - normal activity. */
4230#define TCM_REG_TQM_TCM_IFEN 0x50014
4231/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
4232 acknowledge output is deasserted; all other signals are treated as usual;
4233 if 1 - normal activity. */
4234#define TCM_REG_TSDM_IFEN 0x50018
4235/* [RC 1] Message length mismatch (relative to last indication) at the SDM
4236 interface. */
4237#define TCM_REG_TSDM_LENGTH_MIS 0x50164
4238/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
4239 weight 8 (the most prioritised); 1 stands for weight 1(least
4240 prioritised); 2 stands for weight 2; tc. */
4241#define TCM_REG_TSDM_WEIGHT 0x500c4
4242/* [RW 1] Input usem Interface enable. If 0 - the valid input is
4243 disregarded; acknowledge output is deasserted; all other signals are
4244 treated as usual; if 1 - normal activity. */
4245#define TCM_REG_USEM_IFEN 0x50028
4246/* [RC 1] Message length mismatch (relative to last indication) at the In#8
4247 interface. */
4248#define TCM_REG_USEM_LENGTH_MIS 0x50170
4249/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
4250 weight 8 (the most prioritised); 1 stands for weight 1(least
4251 prioritised); 2 stands for weight 2; tc. */
4252#define TCM_REG_USEM_WEIGHT 0x500b8
4253/* [RW 21] Indirect access to the descriptor table of the XX protection
4254 mechanism. The fields are: [5:0] - length of the message; 15:6] - message
4255 pointer; 20:16] - next pointer. */
4256#define TCM_REG_XX_DESCR_TABLE 0x50280
4257#define TCM_REG_XX_DESCR_TABLE_SIZE 29
4258/* [R 6] Use to read the value of XX protection Free counter. */
4259#define TCM_REG_XX_FREE 0x50178
4260/* [RW 6] Initial value for the credit counter; responsible for fulfilling
4261 of the Input Stage XX protection buffer by the XX protection pending
4262 messages. Max credit available - 127.Write writes the initial credit
4263 value; read returns the current value of the credit counter. Must be
4264 initialized to 19 at start-up. */
4265#define TCM_REG_XX_INIT_CRD 0x50220
4266/* [RW 6] Maximum link list size (messages locked) per connection in the XX
4267 protection. */
4268#define TCM_REG_XX_MAX_LL_SZ 0x50044
4269/* [RW 6] The maximum number of pending messages; which may be stored in XX
4270 protection. ~tcm_registers_xx_free.xx_free is read on read. */
4271#define TCM_REG_XX_MSG_NUM 0x50224
4272/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
4273#define TCM_REG_XX_OVFL_EVNT_ID 0x50048
4274/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
4275 The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
4276 header pointer. */
4277#define TCM_REG_XX_TABLE 0x50240
4278/* [RW 4] Load value for cfc ac credit cnt. */
4279#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
4280/* [RW 4] Load value for cfc cld credit cnt. */
4281#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
4282/* [RW 8] Client0 context region. */
4283#define TM_REG_CL0_CONT_REGION 0x164030
4284/* [RW 8] Client1 context region. */
4285#define TM_REG_CL1_CONT_REGION 0x164034
4286/* [RW 8] Client2 context region. */
4287#define TM_REG_CL2_CONT_REGION 0x164038
4288/* [RW 2] Client in High priority client number. */
4289#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024
4290/* [RW 4] Load value for clout0 cred cnt. */
4291#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220
4292/* [RW 4] Load value for clout1 cred cnt. */
4293#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228
4294/* [RW 4] Load value for clout2 cred cnt. */
4295#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230
4296/* [RW 1] Enable client0 input. */
4297#define TM_REG_EN_CL0_INPUT 0x164008
4298/* [RW 1] Enable client1 input. */
4299#define TM_REG_EN_CL1_INPUT 0x16400c
4300/* [RW 1] Enable client2 input. */
4301#define TM_REG_EN_CL2_INPUT 0x164010
4302#define TM_REG_EN_LINEAR0_TIMER 0x164014
4303/* [RW 1] Enable real time counter. */
4304#define TM_REG_EN_REAL_TIME_CNT 0x1640d8
4305/* [RW 1] Enable for Timers state machines. */
4306#define TM_REG_EN_TIMERS 0x164000
4307/* [RW 4] Load value for expiration credit cnt. CFC max number of
4308 outstanding load requests for timers (expiration) context loading. */
4309#define TM_REG_EXP_CRDCNT_VAL 0x164238
4310/* [RW 32] Linear0 logic address. */
4311#define TM_REG_LIN0_LOGIC_ADDR 0x164240
4312/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
4313#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
4314/* [ST 16] Linear0 Number of scans counter. */
4315#define TM_REG_LIN0_NUM_SCANS 0x1640a0
4316/* [WB 64] Linear0 phy address. */
4317#define TM_REG_LIN0_PHY_ADDR 0x164270
4318/* [RW 1] Linear0 physical address valid. */
4319#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
4320#define TM_REG_LIN0_SCAN_ON 0x1640d0
4321/* [RW 24] Linear0 array scan timeout. */
4322#define TM_REG_LIN0_SCAN_TIME 0x16403c
4323#define TM_REG_LIN0_VNIC_UC 0x164128
4324/* [RW 32] Linear1 logic address. */
4325#define TM_REG_LIN1_LOGIC_ADDR 0x164250
4326/* [WB 64] Linear1 phy address. */
4327#define TM_REG_LIN1_PHY_ADDR 0x164280
4328/* [RW 1] Linear1 physical address valid. */
4329#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
4330/* [RW 6] Linear timer set_clear fifo threshold. */
4331#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
4332/* [RW 2] Load value for pci arbiter credit cnt. */
4333#define TM_REG_PCIARB_CRDCNT_VAL 0x164260
4334/* [RW 20] The amount of hardware cycles for each timer tick. */
4335#define TM_REG_TIMER_TICK_SIZE 0x16401c
4336/* [RW 8] Timers Context region. */
4337#define TM_REG_TM_CONTEXT_REGION 0x164044
4338/* [RW 1] Interrupt mask register #0 read/write */
4339#define TM_REG_TM_INT_MASK 0x1640fc
4340/* [R 1] Interrupt register #0 read */
4341#define TM_REG_TM_INT_STS 0x1640f0
4342/* [RW 7] Parity mask register #0 read/write */
4343#define TM_REG_TM_PRTY_MASK 0x16410c
4344/* [RC 7] Parity register #0 read clear */
4345#define TM_REG_TM_PRTY_STS_CLR 0x164104
4346/* [RW 8] The event id for aggregated interrupt 0 */
4347#define TSDM_REG_AGG_INT_EVENT_0 0x42038
4348#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
4349#define TSDM_REG_AGG_INT_EVENT_2 0x42040
4350#define TSDM_REG_AGG_INT_EVENT_3 0x42044
4351#define TSDM_REG_AGG_INT_EVENT_4 0x42048
4352/* [RW 1] The T bit for aggregated interrupt 0 */
4353#define TSDM_REG_AGG_INT_T_0 0x420b8
4354#define TSDM_REG_AGG_INT_T_1 0x420bc
4355/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
4356#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
4357/* [RW 16] The maximum value of the completion counter #0 */
4358#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
4359/* [RW 16] The maximum value of the completion counter #1 */
4360#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
4361/* [RW 16] The maximum value of the completion counter #2 */
4362#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
4363/* [RW 16] The maximum value of the completion counter #3 */
4364#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
4365/* [RW 13] The start address in the internal RAM for the completion
4366 counters. */
4367#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c
4368#define TSDM_REG_ENABLE_IN1 0x42238
4369#define TSDM_REG_ENABLE_IN2 0x4223c
4370#define TSDM_REG_ENABLE_OUT1 0x42240
4371#define TSDM_REG_ENABLE_OUT2 0x42244
4372/* [RW 4] The initial number of messages that can be sent to the pxp control
4373 interface without receiving any ACK. */
4374#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc
4375/* [ST 32] The number of ACK after placement messages received */
4376#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c
4377/* [ST 32] The number of packet end messages received from the parser */
4378#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274
4379/* [ST 32] The number of requests received from the pxp async if */
4380#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278
4381/* [ST 32] The number of commands received in queue 0 */
4382#define TSDM_REG_NUM_OF_Q0_CMD 0x42248
4383/* [ST 32] The number of commands received in queue 10 */
4384#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c
4385/* [ST 32] The number of commands received in queue 11 */
4386#define TSDM_REG_NUM_OF_Q11_CMD 0x42270
4387/* [ST 32] The number of commands received in queue 1 */
4388#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c
4389/* [ST 32] The number of commands received in queue 3 */
4390#define TSDM_REG_NUM_OF_Q3_CMD 0x42250
4391/* [ST 32] The number of commands received in queue 4 */
4392#define TSDM_REG_NUM_OF_Q4_CMD 0x42254
4393/* [ST 32] The number of commands received in queue 5 */
4394#define TSDM_REG_NUM_OF_Q5_CMD 0x42258
4395/* [ST 32] The number of commands received in queue 6 */
4396#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c
4397/* [ST 32] The number of commands received in queue 7 */
4398#define TSDM_REG_NUM_OF_Q7_CMD 0x42260
4399/* [ST 32] The number of commands received in queue 8 */
4400#define TSDM_REG_NUM_OF_Q8_CMD 0x42264
4401/* [ST 32] The number of commands received in queue 9 */
4402#define TSDM_REG_NUM_OF_Q9_CMD 0x42268
4403/* [RW 13] The start address in the internal RAM for the packet end message */
4404#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014
4405/* [RW 13] The start address in the internal RAM for queue counters */
4406#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010
4407/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
4408#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548
4409/* [R 1] parser fifo empty in sdm_sync block */
4410#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550
4411/* [R 1] parser serial fifo empty in sdm_sync block */
4412#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558
4413/* [RW 32] Tick for timer counter. Applicable only when
4414 ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
4415#define TSDM_REG_TIMER_TICK 0x42000
4416/* [RW 32] Interrupt mask register #0 read/write */
4417#define TSDM_REG_TSDM_INT_MASK_0 0x4229c
4418#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
4419/* [R 32] Interrupt register #0 read */
4420#define TSDM_REG_TSDM_INT_STS_0 0x42290
4421#define TSDM_REG_TSDM_INT_STS_1 0x422a0
4422/* [RW 11] Parity mask register #0 read/write */
4423#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
4424/* [R 11] Parity register #0 read */
4425#define TSDM_REG_TSDM_PRTY_STS 0x422b0
4426/* [RC 11] Parity register #0 read clear */
4427#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
4428/* [RW 5] The number of time_slots in the arbitration cycle */
4429#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
4430/* [RW 3] The source that is associated with arbitration element 0. Source
4431 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4432 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
4433#define TSEM_REG_ARB_ELEMENT0 0x180020
4434/* [RW 3] The source that is associated with arbitration element 1. Source
4435 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4436 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4437 Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
4438#define TSEM_REG_ARB_ELEMENT1 0x180024
4439/* [RW 3] The source that is associated with arbitration element 2. Source
4440 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4441 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4442 Could not be equal to register ~tsem_registers_arb_element0.arb_element0
4443 and ~tsem_registers_arb_element1.arb_element1 */
4444#define TSEM_REG_ARB_ELEMENT2 0x180028
4445/* [RW 3] The source that is associated with arbitration element 3. Source
4446 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4447 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
4448 not be equal to register ~tsem_registers_arb_element0.arb_element0 and
4449 ~tsem_registers_arb_element1.arb_element1 and
4450 ~tsem_registers_arb_element2.arb_element2 */
4451#define TSEM_REG_ARB_ELEMENT3 0x18002c
4452/* [RW 3] The source that is associated with arbitration element 4. Source
4453 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4454 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4455 Could not be equal to register ~tsem_registers_arb_element0.arb_element0
4456 and ~tsem_registers_arb_element1.arb_element1 and
4457 ~tsem_registers_arb_element2.arb_element2 and
4458 ~tsem_registers_arb_element3.arb_element3 */
4459#define TSEM_REG_ARB_ELEMENT4 0x180030
4460#define TSEM_REG_ENABLE_IN 0x1800a4
4461#define TSEM_REG_ENABLE_OUT 0x1800a8
4462/* [RW 32] This address space contains all registers and memories that are
4463 placed in SEM_FAST block. The SEM_FAST registers are described in
4464 appendix B. In order to access the sem_fast registers the base address
4465 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
4466#define TSEM_REG_FAST_MEMORY 0x1a0000
4467/* [RW 1] Disables input messages from FIC0 May be updated during run_time
4468 by the microcode */
4469#define TSEM_REG_FIC0_DISABLE 0x180224
4470/* [RW 1] Disables input messages from FIC1 May be updated during run_time
4471 by the microcode */
4472#define TSEM_REG_FIC1_DISABLE 0x180234
4473/* [RW 15] Interrupt table Read and write access to it is not possible in
4474 the middle of the work */
4475#define TSEM_REG_INT_TABLE 0x180400
4476/* [ST 24] Statistics register. The number of messages that entered through
4477 FIC0 */
4478#define TSEM_REG_MSG_NUM_FIC0 0x180000
4479/* [ST 24] Statistics register. The number of messages that entered through
4480 FIC1 */
4481#define TSEM_REG_MSG_NUM_FIC1 0x180004
4482/* [ST 24] Statistics register. The number of messages that were sent to
4483 FOC0 */
4484#define TSEM_REG_MSG_NUM_FOC0 0x180008
4485/* [ST 24] Statistics register. The number of messages that were sent to
4486 FOC1 */
4487#define TSEM_REG_MSG_NUM_FOC1 0x18000c
4488/* [ST 24] Statistics register. The number of messages that were sent to
4489 FOC2 */
4490#define TSEM_REG_MSG_NUM_FOC2 0x180010
4491/* [ST 24] Statistics register. The number of messages that were sent to
4492 FOC3 */
4493#define TSEM_REG_MSG_NUM_FOC3 0x180014
4494/* [RW 1] Disables input messages from the passive buffer May be updated
4495 during run_time by the microcode */
4496#define TSEM_REG_PAS_DISABLE 0x18024c
4497/* [WB 128] Debug only. Passive buffer memory */
4498#define TSEM_REG_PASSIVE_BUFFER 0x181000
4499/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
4500#define TSEM_REG_PRAM 0x1c0000
4501/* [R 8] Valid sleeping threads indication have bit per thread */
4502#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c
4503/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
4504#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
4505/* [RW 8] List of free threads . There is a bit per thread. */
4506#define TSEM_REG_THREADS_LIST 0x1802e4
4507/* [RC 32] Parity register #0 read clear */
4508#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
4509#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
4510/* [RW 3] The arbitration scheme of time_slot 0 */
4511#define TSEM_REG_TS_0_AS 0x180038
4512/* [RW 3] The arbitration scheme of time_slot 10 */
4513#define TSEM_REG_TS_10_AS 0x180060
4514/* [RW 3] The arbitration scheme of time_slot 11 */
4515#define TSEM_REG_TS_11_AS 0x180064
4516/* [RW 3] The arbitration scheme of time_slot 12 */
4517#define TSEM_REG_TS_12_AS 0x180068
4518/* [RW 3] The arbitration scheme of time_slot 13 */
4519#define TSEM_REG_TS_13_AS 0x18006c
4520/* [RW 3] The arbitration scheme of time_slot 14 */
4521#define TSEM_REG_TS_14_AS 0x180070
4522/* [RW 3] The arbitration scheme of time_slot 15 */
4523#define TSEM_REG_TS_15_AS 0x180074
4524/* [RW 3] The arbitration scheme of time_slot 16 */
4525#define TSEM_REG_TS_16_AS 0x180078
4526/* [RW 3] The arbitration scheme of time_slot 17 */
4527#define TSEM_REG_TS_17_AS 0x18007c
4528/* [RW 3] The arbitration scheme of time_slot 18 */
4529#define TSEM_REG_TS_18_AS 0x180080
4530/* [RW 3] The arbitration scheme of time_slot 1 */
4531#define TSEM_REG_TS_1_AS 0x18003c
4532/* [RW 3] The arbitration scheme of time_slot 2 */
4533#define TSEM_REG_TS_2_AS 0x180040
4534/* [RW 3] The arbitration scheme of time_slot 3 */
4535#define TSEM_REG_TS_3_AS 0x180044
4536/* [RW 3] The arbitration scheme of time_slot 4 */
4537#define TSEM_REG_TS_4_AS 0x180048
4538/* [RW 3] The arbitration scheme of time_slot 5 */
4539#define TSEM_REG_TS_5_AS 0x18004c
4540/* [RW 3] The arbitration scheme of time_slot 6 */
4541#define TSEM_REG_TS_6_AS 0x180050
4542/* [RW 3] The arbitration scheme of time_slot 7 */
4543#define TSEM_REG_TS_7_AS 0x180054
4544/* [RW 3] The arbitration scheme of time_slot 8 */
4545#define TSEM_REG_TS_8_AS 0x180058
4546/* [RW 3] The arbitration scheme of time_slot 9 */
4547#define TSEM_REG_TS_9_AS 0x18005c
4548/* [RW 32] Interrupt mask register #0 read/write */
4549#define TSEM_REG_TSEM_INT_MASK_0 0x180100
4550#define TSEM_REG_TSEM_INT_MASK_1 0x180110
4551/* [R 32] Interrupt register #0 read */
4552#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
4553#define TSEM_REG_TSEM_INT_STS_1 0x180104
4554/* [RW 32] Parity mask register #0 read/write */
4555#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
4556#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
4557/* [R 32] Parity register #0 read */
4558#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
4559#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
4560/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4561 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4562#define TSEM_REG_VFPF_ERR_NUM 0x180380
4563/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
4564 * [10:8] of the address should be the offset within the accessed LCID
4565 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
4566 * LCID100. The RBC address should be 12'ha64. */
4567#define UCM_REG_AG_CTX 0xe2000
4568/* [R 5] Used to read the XX protection CAM occupancy counter. */
4569#define UCM_REG_CAM_OCCUP 0xe0170
4570/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
4571 disregarded; valid output is deasserted; all other signals are treated as
4572 usual; if 1 - normal activity. */
4573#define UCM_REG_CDU_AG_RD_IFEN 0xe0038
4574/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
4575 are disregarded; all other signals are treated as usual; if 1 - normal
4576 activity. */
4577#define UCM_REG_CDU_AG_WR_IFEN 0xe0034
4578/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
4579 disregarded; valid output is deasserted; all other signals are treated as
4580 usual; if 1 - normal activity. */
4581#define UCM_REG_CDU_SM_RD_IFEN 0xe0040
4582/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
4583 input is disregarded; all other signals are treated as usual; if 1 -
4584 normal activity. */
4585#define UCM_REG_CDU_SM_WR_IFEN 0xe003c
4586/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
4587 the initial credit value; read returns the current value of the credit
4588 counter. Must be initialized to 1 at start-up. */
4589#define UCM_REG_CFC_INIT_CRD 0xe0204
4590/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
4591 weight 8 (the most prioritised); 1 stands for weight 1(least
4592 prioritised); 2 stands for weight 2; tc. */
4593#define UCM_REG_CP_WEIGHT 0xe00c4
4594/* [RW 1] Input csem Interface enable. If 0 - the valid input is
4595 disregarded; acknowledge output is deasserted; all other signals are
4596 treated as usual; if 1 - normal activity. */
4597#define UCM_REG_CSEM_IFEN 0xe0028
4598/* [RC 1] Set when the message length mismatch (relative to last indication)
4599 at the csem interface is detected. */
4600#define UCM_REG_CSEM_LENGTH_MIS 0xe0160
4601/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
4602 weight 8 (the most prioritised); 1 stands for weight 1(least
4603 prioritised); 2 stands for weight 2; tc. */
4604#define UCM_REG_CSEM_WEIGHT 0xe00b8
4605/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
4606 disregarded; acknowledge output is deasserted; all other signals are
4607 treated as usual; if 1 - normal activity. */
4608#define UCM_REG_DORQ_IFEN 0xe0030
4609/* [RC 1] Set when the message length mismatch (relative to last indication)
4610 at the dorq interface is detected. */
4611#define UCM_REG_DORQ_LENGTH_MIS 0xe0168
4612/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
4613 weight 8 (the most prioritised); 1 stands for weight 1(least
4614 prioritised); 2 stands for weight 2; tc. */
4615#define UCM_REG_DORQ_WEIGHT 0xe00c0
4616/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
4617#define UCM_REG_ERR_EVNT_ID 0xe00a4
4618/* [RW 28] The CM erroneous header for QM and Timers formatting. */
4619#define UCM_REG_ERR_UCM_HDR 0xe00a0
4620/* [RW 8] The Event ID for Timers expiration. */
4621#define UCM_REG_EXPR_EVNT_ID 0xe00a8
4622/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
4623 writes the initial credit value; read returns the current value of the
4624 credit counter. Must be initialized to 64 at start-up. */
4625#define UCM_REG_FIC0_INIT_CRD 0xe020c
4626/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
4627 writes the initial credit value; read returns the current value of the
4628 credit counter. Must be initialized to 64 at start-up. */
4629#define UCM_REG_FIC1_INIT_CRD 0xe0210
4630/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
4631 - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
4632 ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
4633 ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
4634#define UCM_REG_GR_ARB_TYPE 0xe0144
4635/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
4636 highest priority is 3. It is supposed that the Store channel group is
4637 compliment to the others. */
4638#define UCM_REG_GR_LD0_PR 0xe014c
4639/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
4640 highest priority is 3. It is supposed that the Store channel group is
4641 compliment to the others. */
4642#define UCM_REG_GR_LD1_PR 0xe0150
4643/* [RW 2] The queue index for invalidate counter flag decision. */
4644#define UCM_REG_INV_CFLG_Q 0xe00e4
4645/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
4646 sent to STORM; for a specific connection type. the double REG-pairs are
4647 used in order to align to STORM context row size of 128 bits. The offset
4648 of these data in the STORM context is always 0. Index _i stands for the
4649 connection type (one of 16). */
4650#define UCM_REG_N_SM_CTX_LD_0 0xe0054
4651#define UCM_REG_N_SM_CTX_LD_1 0xe0058
4652#define UCM_REG_N_SM_CTX_LD_2 0xe005c
4653#define UCM_REG_N_SM_CTX_LD_3 0xe0060
4654#define UCM_REG_N_SM_CTX_LD_4 0xe0064
4655#define UCM_REG_N_SM_CTX_LD_5 0xe0068
4656#define UCM_REG_PHYS_QNUM0_0 0xe0110
4657#define UCM_REG_PHYS_QNUM0_1 0xe0114
4658#define UCM_REG_PHYS_QNUM1_0 0xe0118
4659#define UCM_REG_PHYS_QNUM1_1 0xe011c
4660#define UCM_REG_PHYS_QNUM2_0 0xe0120
4661#define UCM_REG_PHYS_QNUM2_1 0xe0124
4662#define UCM_REG_PHYS_QNUM3_0 0xe0128
4663#define UCM_REG_PHYS_QNUM3_1 0xe012c
4664/* [RW 8] The Event ID for Timers formatting in case of stop done. */
4665#define UCM_REG_STOP_EVNT_ID 0xe00ac
4666/* [RC 1] Set when the message length mismatch (relative to last indication)
4667 at the STORM interface is detected. */
4668#define UCM_REG_STORM_LENGTH_MIS 0xe0154
4669/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
4670 disregarded; acknowledge output is deasserted; all other signals are
4671 treated as usual; if 1 - normal activity. */
4672#define UCM_REG_STORM_UCM_IFEN 0xe0010
4673/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
4674 weight 8 (the most prioritised); 1 stands for weight 1(least
4675 prioritised); 2 stands for weight 2; tc. */
4676#define UCM_REG_STORM_WEIGHT 0xe00b0
4677/* [RW 4] Timers output initial credit. Max credit available - 15.Write
4678 writes the initial credit value; read returns the current value of the
4679 credit counter. Must be initialized to 4 at start-up. */
4680#define UCM_REG_TM_INIT_CRD 0xe021c
4681/* [RW 28] The CM header for Timers expiration command. */
4682#define UCM_REG_TM_UCM_HDR 0xe009c
4683/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
4684 disregarded; acknowledge output is deasserted; all other signals are
4685 treated as usual; if 1 - normal activity. */
4686#define UCM_REG_TM_UCM_IFEN 0xe001c
4687/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
4688 weight 8 (the most prioritised); 1 stands for weight 1(least
4689 prioritised); 2 stands for weight 2; tc. */
4690#define UCM_REG_TM_WEIGHT 0xe00d4
4691/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
4692 disregarded; acknowledge output is deasserted; all other signals are
4693 treated as usual; if 1 - normal activity. */
4694#define UCM_REG_TSEM_IFEN 0xe0024
4695/* [RC 1] Set when the message length mismatch (relative to last indication)
4696 at the tsem interface is detected. */
4697#define UCM_REG_TSEM_LENGTH_MIS 0xe015c
4698/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
4699 weight 8 (the most prioritised); 1 stands for weight 1(least
4700 prioritised); 2 stands for weight 2; tc. */
4701#define UCM_REG_TSEM_WEIGHT 0xe00b4
4702/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
4703 acknowledge output is deasserted; all other signals are treated as usual;
4704 if 1 - normal activity. */
4705#define UCM_REG_UCM_CFC_IFEN 0xe0044
4706/* [RW 11] Interrupt mask register #0 read/write */
4707#define UCM_REG_UCM_INT_MASK 0xe01d4
4708/* [R 11] Interrupt register #0 read */
4709#define UCM_REG_UCM_INT_STS 0xe01c8
4710/* [RW 27] Parity mask register #0 read/write */
4711#define UCM_REG_UCM_PRTY_MASK 0xe01e4
4712/* [R 27] Parity register #0 read */
4713#define UCM_REG_UCM_PRTY_STS 0xe01d8
4714/* [RC 27] Parity register #0 read clear */
4715#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
4716/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
4717 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
4718 Is used to determine the number of the AG context REG-pairs written back;
4719 when the Reg1WbFlg isn't set. */
4720#define UCM_REG_UCM_REG0_SZ 0xe00dc
4721/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
4722 disregarded; valid is deasserted; all other signals are treated as usual;
4723 if 1 - normal activity. */
4724#define UCM_REG_UCM_STORM0_IFEN 0xe0004
4725/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
4726 disregarded; valid is deasserted; all other signals are treated as usual;
4727 if 1 - normal activity. */
4728#define UCM_REG_UCM_STORM1_IFEN 0xe0008
4729/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
4730 disregarded; acknowledge output is deasserted; all other signals are
4731 treated as usual; if 1 - normal activity. */
4732#define UCM_REG_UCM_TM_IFEN 0xe0020
4733/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
4734 disregarded; valid is deasserted; all other signals are treated as usual;
4735 if 1 - normal activity. */
4736#define UCM_REG_UCM_UQM_IFEN 0xe000c
4737/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
4738#define UCM_REG_UCM_UQM_USE_Q 0xe00d8
4739/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
4740 the initial credit value; read returns the current value of the credit
4741 counter. Must be initialized to 32 at start-up. */
4742#define UCM_REG_UQM_INIT_CRD 0xe0220
4743/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
4744 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4745 prioritised); 2 stands for weight 2; tc. */
4746#define UCM_REG_UQM_P_WEIGHT 0xe00cc
4747/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
4748 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4749 prioritised); 2 stands for weight 2; tc. */
4750#define UCM_REG_UQM_S_WEIGHT 0xe00d0
4751/* [RW 28] The CM header value for QM request (primary). */
4752#define UCM_REG_UQM_UCM_HDR_P 0xe0094
4753/* [RW 28] The CM header value for QM request (secondary). */
4754#define UCM_REG_UQM_UCM_HDR_S 0xe0098
4755/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
4756 acknowledge output is deasserted; all other signals are treated as usual;
4757 if 1 - normal activity. */
4758#define UCM_REG_UQM_UCM_IFEN 0xe0014
4759/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
4760 acknowledge output is deasserted; all other signals are treated as usual;
4761 if 1 - normal activity. */
4762#define UCM_REG_USDM_IFEN 0xe0018
4763/* [RC 1] Set when the message length mismatch (relative to last indication)
4764 at the SDM interface is detected. */
4765#define UCM_REG_USDM_LENGTH_MIS 0xe0158
4766/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
4767 weight 8 (the most prioritised); 1 stands for weight 1(least
4768 prioritised); 2 stands for weight 2; tc. */
4769#define UCM_REG_USDM_WEIGHT 0xe00c8
4770/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
4771 disregarded; acknowledge output is deasserted; all other signals are
4772 treated as usual; if 1 - normal activity. */
4773#define UCM_REG_XSEM_IFEN 0xe002c
4774/* [RC 1] Set when the message length mismatch (relative to last indication)
4775 at the xsem interface isdetected. */
4776#define UCM_REG_XSEM_LENGTH_MIS 0xe0164
4777/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
4778 weight 8 (the most prioritised); 1 stands for weight 1(least
4779 prioritised); 2 stands for weight 2; tc. */
4780#define UCM_REG_XSEM_WEIGHT 0xe00bc
4781/* [RW 20] Indirect access to the descriptor table of the XX protection
4782 mechanism. The fields are:[5:0] - message length; 14:6] - message
4783 pointer; 19:15] - next pointer. */
4784#define UCM_REG_XX_DESCR_TABLE 0xe0280
4785#define UCM_REG_XX_DESCR_TABLE_SIZE 27
4786/* [R 6] Use to read the XX protection Free counter. */
4787#define UCM_REG_XX_FREE 0xe016c
4788/* [RW 6] Initial value for the credit counter; responsible for fulfilling
4789 of the Input Stage XX protection buffer by the XX protection pending
4790 messages. Write writes the initial credit value; read returns the current
4791 value of the credit counter. Must be initialized to 12 at start-up. */
4792#define UCM_REG_XX_INIT_CRD 0xe0224
4793/* [RW 6] The maximum number of pending messages; which may be stored in XX
4794 protection. ~ucm_registers_xx_free.xx_free read on read. */
4795#define UCM_REG_XX_MSG_NUM 0xe0228
4796/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
4797#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c
4798/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
4799 The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
4800 header pointer. */
4801#define UCM_REG_XX_TABLE 0xe0300
4802#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
4803#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
4804#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
4805#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
4806#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
4807#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
4808#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
4809#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
4810#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
4811#define UMAC_REG_COMMAND_CONFIG 0x8
4812/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
4813 * to bit 17 of the MAC address etc. */
4814#define UMAC_REG_MAC_ADDR0 0xc
4815/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
4816 * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
4817#define UMAC_REG_MAC_ADDR1 0x10
4818/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
4819 * logic to check frames. */
4820#define UMAC_REG_MAXFR 0x14
4821/* [RW 8] The event id for aggregated interrupt 0 */
4822#define USDM_REG_AGG_INT_EVENT_0 0xc4038
4823#define USDM_REG_AGG_INT_EVENT_1 0xc403c
4824#define USDM_REG_AGG_INT_EVENT_2 0xc4040
4825#define USDM_REG_AGG_INT_EVENT_4 0xc4048
4826#define USDM_REG_AGG_INT_EVENT_5 0xc404c
4827#define USDM_REG_AGG_INT_EVENT_6 0xc4050
4828/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
4829 or auto-mask-mode (1) */
4830#define USDM_REG_AGG_INT_MODE_0 0xc41b8
4831#define USDM_REG_AGG_INT_MODE_1 0xc41bc
4832#define USDM_REG_AGG_INT_MODE_4 0xc41c8
4833#define USDM_REG_AGG_INT_MODE_5 0xc41cc
4834#define USDM_REG_AGG_INT_MODE_6 0xc41d0
4835/* [RW 1] The T bit for aggregated interrupt 5 */
4836#define USDM_REG_AGG_INT_T_5 0xc40cc
4837#define USDM_REG_AGG_INT_T_6 0xc40d0
4838/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
4839#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
4840/* [RW 16] The maximum value of the completion counter #0 */
4841#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
4842/* [RW 16] The maximum value of the completion counter #1 */
4843#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
4844/* [RW 16] The maximum value of the completion counter #2 */
4845#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
4846/* [RW 16] The maximum value of the completion counter #3 */
4847#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
4848/* [RW 13] The start address in the internal RAM for the completion
4849 counters. */
4850#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c
4851#define USDM_REG_ENABLE_IN1 0xc4238
4852#define USDM_REG_ENABLE_IN2 0xc423c
4853#define USDM_REG_ENABLE_OUT1 0xc4240
4854#define USDM_REG_ENABLE_OUT2 0xc4244
4855/* [RW 4] The initial number of messages that can be sent to the pxp control
4856 interface without receiving any ACK. */
4857#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0
4858/* [ST 32] The number of ACK after placement messages received */
4859#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280
4860/* [ST 32] The number of packet end messages received from the parser */
4861#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278
4862/* [ST 32] The number of requests received from the pxp async if */
4863#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c
4864/* [ST 32] The number of commands received in queue 0 */
4865#define USDM_REG_NUM_OF_Q0_CMD 0xc4248
4866/* [ST 32] The number of commands received in queue 10 */
4867#define USDM_REG_NUM_OF_Q10_CMD 0xc4270
4868/* [ST 32] The number of commands received in queue 11 */
4869#define USDM_REG_NUM_OF_Q11_CMD 0xc4274
4870/* [ST 32] The number of commands received in queue 1 */
4871#define USDM_REG_NUM_OF_Q1_CMD 0xc424c
4872/* [ST 32] The number of commands received in queue 2 */
4873#define USDM_REG_NUM_OF_Q2_CMD 0xc4250
4874/* [ST 32] The number of commands received in queue 3 */
4875#define USDM_REG_NUM_OF_Q3_CMD 0xc4254
4876/* [ST 32] The number of commands received in queue 4 */
4877#define USDM_REG_NUM_OF_Q4_CMD 0xc4258
4878/* [ST 32] The number of commands received in queue 5 */
4879#define USDM_REG_NUM_OF_Q5_CMD 0xc425c
4880/* [ST 32] The number of commands received in queue 6 */
4881#define USDM_REG_NUM_OF_Q6_CMD 0xc4260
4882/* [ST 32] The number of commands received in queue 7 */
4883#define USDM_REG_NUM_OF_Q7_CMD 0xc4264
4884/* [ST 32] The number of commands received in queue 8 */
4885#define USDM_REG_NUM_OF_Q8_CMD 0xc4268
4886/* [ST 32] The number of commands received in queue 9 */
4887#define USDM_REG_NUM_OF_Q9_CMD 0xc426c
4888/* [RW 13] The start address in the internal RAM for the packet end message */
4889#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014
4890/* [RW 13] The start address in the internal RAM for queue counters */
4891#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010
4892/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
4893#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550
4894/* [R 1] parser fifo empty in sdm_sync block */
4895#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558
4896/* [R 1] parser serial fifo empty in sdm_sync block */
4897#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560
4898/* [RW 32] Tick for timer counter. Applicable only when
4899 ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
4900#define USDM_REG_TIMER_TICK 0xc4000
4901/* [RW 32] Interrupt mask register #0 read/write */
4902#define USDM_REG_USDM_INT_MASK_0 0xc42a0
4903#define USDM_REG_USDM_INT_MASK_1 0xc42b0
4904/* [R 32] Interrupt register #0 read */
4905#define USDM_REG_USDM_INT_STS_0 0xc4294
4906#define USDM_REG_USDM_INT_STS_1 0xc42a4
4907/* [RW 11] Parity mask register #0 read/write */
4908#define USDM_REG_USDM_PRTY_MASK 0xc42c0
4909/* [R 11] Parity register #0 read */
4910#define USDM_REG_USDM_PRTY_STS 0xc42b4
4911/* [RC 11] Parity register #0 read clear */
4912#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
4913/* [RW 5] The number of time_slots in the arbitration cycle */
4914#define USEM_REG_ARB_CYCLE_SIZE 0x300034
4915/* [RW 3] The source that is associated with arbitration element 0. Source
4916 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4917 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
4918#define USEM_REG_ARB_ELEMENT0 0x300020
4919/* [RW 3] The source that is associated with arbitration element 1. Source
4920 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4921 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4922 Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
4923#define USEM_REG_ARB_ELEMENT1 0x300024
4924/* [RW 3] The source that is associated with arbitration element 2. Source
4925 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4926 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4927 Could not be equal to register ~usem_registers_arb_element0.arb_element0
4928 and ~usem_registers_arb_element1.arb_element1 */
4929#define USEM_REG_ARB_ELEMENT2 0x300028
4930/* [RW 3] The source that is associated with arbitration element 3. Source
4931 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4932 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
4933 not be equal to register ~usem_registers_arb_element0.arb_element0 and
4934 ~usem_registers_arb_element1.arb_element1 and
4935 ~usem_registers_arb_element2.arb_element2 */
4936#define USEM_REG_ARB_ELEMENT3 0x30002c
4937/* [RW 3] The source that is associated with arbitration element 4. Source
4938 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4939 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4940 Could not be equal to register ~usem_registers_arb_element0.arb_element0
4941 and ~usem_registers_arb_element1.arb_element1 and
4942 ~usem_registers_arb_element2.arb_element2 and
4943 ~usem_registers_arb_element3.arb_element3 */
4944#define USEM_REG_ARB_ELEMENT4 0x300030
4945#define USEM_REG_ENABLE_IN 0x3000a4
4946#define USEM_REG_ENABLE_OUT 0x3000a8
4947/* [RW 32] This address space contains all registers and memories that are
4948 placed in SEM_FAST block. The SEM_FAST registers are described in
4949 appendix B. In order to access the sem_fast registers the base address
4950 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
4951#define USEM_REG_FAST_MEMORY 0x320000
4952/* [RW 1] Disables input messages from FIC0 May be updated during run_time
4953 by the microcode */
4954#define USEM_REG_FIC0_DISABLE 0x300224
4955/* [RW 1] Disables input messages from FIC1 May be updated during run_time
4956 by the microcode */
4957#define USEM_REG_FIC1_DISABLE 0x300234
4958/* [RW 15] Interrupt table Read and write access to it is not possible in
4959 the middle of the work */
4960#define USEM_REG_INT_TABLE 0x300400
4961/* [ST 24] Statistics register. The number of messages that entered through
4962 FIC0 */
4963#define USEM_REG_MSG_NUM_FIC0 0x300000
4964/* [ST 24] Statistics register. The number of messages that entered through
4965 FIC1 */
4966#define USEM_REG_MSG_NUM_FIC1 0x300004
4967/* [ST 24] Statistics register. The number of messages that were sent to
4968 FOC0 */
4969#define USEM_REG_MSG_NUM_FOC0 0x300008
4970/* [ST 24] Statistics register. The number of messages that were sent to
4971 FOC1 */
4972#define USEM_REG_MSG_NUM_FOC1 0x30000c
4973/* [ST 24] Statistics register. The number of messages that were sent to
4974 FOC2 */
4975#define USEM_REG_MSG_NUM_FOC2 0x300010
4976/* [ST 24] Statistics register. The number of messages that were sent to
4977 FOC3 */
4978#define USEM_REG_MSG_NUM_FOC3 0x300014
4979/* [RW 1] Disables input messages from the passive buffer May be updated
4980 during run_time by the microcode */
4981#define USEM_REG_PAS_DISABLE 0x30024c
4982/* [WB 128] Debug only. Passive buffer memory */
4983#define USEM_REG_PASSIVE_BUFFER 0x302000
4984/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
4985#define USEM_REG_PRAM 0x340000
4986/* [R 16] Valid sleeping threads indication have bit per thread */
4987#define USEM_REG_SLEEP_THREADS_VALID 0x30026c
4988/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
4989#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0
4990/* [RW 16] List of free threads . There is a bit per thread. */
4991#define USEM_REG_THREADS_LIST 0x3002e4
4992/* [RW 3] The arbitration scheme of time_slot 0 */
4993#define USEM_REG_TS_0_AS 0x300038
4994/* [RW 3] The arbitration scheme of time_slot 10 */
4995#define USEM_REG_TS_10_AS 0x300060
4996/* [RW 3] The arbitration scheme of time_slot 11 */
4997#define USEM_REG_TS_11_AS 0x300064
4998/* [RW 3] The arbitration scheme of time_slot 12 */
4999#define USEM_REG_TS_12_AS 0x300068
5000/* [RW 3] The arbitration scheme of time_slot 13 */
5001#define USEM_REG_TS_13_AS 0x30006c
5002/* [RW 3] The arbitration scheme of time_slot 14 */
5003#define USEM_REG_TS_14_AS 0x300070
5004/* [RW 3] The arbitration scheme of time_slot 15 */
5005#define USEM_REG_TS_15_AS 0x300074
5006/* [RW 3] The arbitration scheme of time_slot 16 */
5007#define USEM_REG_TS_16_AS 0x300078
5008/* [RW 3] The arbitration scheme of time_slot 17 */
5009#define USEM_REG_TS_17_AS 0x30007c
5010/* [RW 3] The arbitration scheme of time_slot 18 */
5011#define USEM_REG_TS_18_AS 0x300080
5012/* [RW 3] The arbitration scheme of time_slot 1 */
5013#define USEM_REG_TS_1_AS 0x30003c
5014/* [RW 3] The arbitration scheme of time_slot 2 */
5015#define USEM_REG_TS_2_AS 0x300040
5016/* [RW 3] The arbitration scheme of time_slot 3 */
5017#define USEM_REG_TS_3_AS 0x300044
5018/* [RW 3] The arbitration scheme of time_slot 4 */
5019#define USEM_REG_TS_4_AS 0x300048
5020/* [RW 3] The arbitration scheme of time_slot 5 */
5021#define USEM_REG_TS_5_AS 0x30004c
5022/* [RW 3] The arbitration scheme of time_slot 6 */
5023#define USEM_REG_TS_6_AS 0x300050
5024/* [RW 3] The arbitration scheme of time_slot 7 */
5025#define USEM_REG_TS_7_AS 0x300054
5026/* [RW 3] The arbitration scheme of time_slot 8 */
5027#define USEM_REG_TS_8_AS 0x300058
5028/* [RW 3] The arbitration scheme of time_slot 9 */
5029#define USEM_REG_TS_9_AS 0x30005c
5030/* [RW 32] Interrupt mask register #0 read/write */
5031#define USEM_REG_USEM_INT_MASK_0 0x300110
5032#define USEM_REG_USEM_INT_MASK_1 0x300120
5033/* [R 32] Interrupt register #0 read */
5034#define USEM_REG_USEM_INT_STS_0 0x300104
5035#define USEM_REG_USEM_INT_STS_1 0x300114
5036/* [RW 32] Parity mask register #0 read/write */
5037#define USEM_REG_USEM_PRTY_MASK_0 0x300130
5038#define USEM_REG_USEM_PRTY_MASK_1 0x300140
5039/* [R 32] Parity register #0 read */
5040#define USEM_REG_USEM_PRTY_STS_0 0x300124
5041#define USEM_REG_USEM_PRTY_STS_1 0x300134
5042/* [RC 32] Parity register #0 read clear */
5043#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
5044#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
5045/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
5046 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
5047#define USEM_REG_VFPF_ERR_NUM 0x300380
5048#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
5049#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
5050#define VFC_REG_MEMORIES_RST 0x1943c
5051/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
5052 * [12:8] of the address should be the offset within the accessed LCID
5053 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
5054 * LCID100. The RBC address should be 13'ha64. */
5055#define XCM_REG_AG_CTX 0x28000
5056/* [RW 2] The queue index for registration on Aux1 counter flag. */
5057#define XCM_REG_AUX1_Q 0x20134
5058/* [RW 2] Per each decision rule the queue index to register to. */
5059#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0
5060/* [R 5] Used to read the XX protection CAM occupancy counter. */
5061#define XCM_REG_CAM_OCCUP 0x20244
5062/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
5063 disregarded; valid output is deasserted; all other signals are treated as
5064 usual; if 1 - normal activity. */
5065#define XCM_REG_CDU_AG_RD_IFEN 0x20044
5066/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
5067 are disregarded; all other signals are treated as usual; if 1 - normal
5068 activity. */
5069#define XCM_REG_CDU_AG_WR_IFEN 0x20040
5070/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
5071 disregarded; valid output is deasserted; all other signals are treated as
5072 usual; if 1 - normal activity. */
5073#define XCM_REG_CDU_SM_RD_IFEN 0x2004c
5074/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
5075 input is disregarded; all other signals are treated as usual; if 1 -
5076 normal activity. */
5077#define XCM_REG_CDU_SM_WR_IFEN 0x20048
5078/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
5079 the initial credit value; read returns the current value of the credit
5080 counter. Must be initialized to 1 at start-up. */
5081#define XCM_REG_CFC_INIT_CRD 0x20404
5082/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
5083 weight 8 (the most prioritised); 1 stands for weight 1(least
5084 prioritised); 2 stands for weight 2; tc. */
5085#define XCM_REG_CP_WEIGHT 0x200dc
5086/* [RW 1] Input csem Interface enable. If 0 - the valid input is
5087 disregarded; acknowledge output is deasserted; all other signals are
5088 treated as usual; if 1 - normal activity. */
5089#define XCM_REG_CSEM_IFEN 0x20028
5090/* [RC 1] Set at message length mismatch (relative to last indication) at
5091 the csem interface. */
5092#define XCM_REG_CSEM_LENGTH_MIS 0x20228
5093/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
5094 weight 8 (the most prioritised); 1 stands for weight 1(least
5095 prioritised); 2 stands for weight 2; tc. */
5096#define XCM_REG_CSEM_WEIGHT 0x200c4
5097/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
5098 disregarded; acknowledge output is deasserted; all other signals are
5099 treated as usual; if 1 - normal activity. */
5100#define XCM_REG_DORQ_IFEN 0x20030
5101/* [RC 1] Set at message length mismatch (relative to last indication) at
5102 the dorq interface. */
5103#define XCM_REG_DORQ_LENGTH_MIS 0x20230
5104/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
5105 weight 8 (the most prioritised); 1 stands for weight 1(least
5106 prioritised); 2 stands for weight 2; tc. */
5107#define XCM_REG_DORQ_WEIGHT 0x200cc
5108/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
5109#define XCM_REG_ERR_EVNT_ID 0x200b0
5110/* [RW 28] The CM erroneous header for QM and Timers formatting. */
5111#define XCM_REG_ERR_XCM_HDR 0x200ac
5112/* [RW 8] The Event ID for Timers expiration. */
5113#define XCM_REG_EXPR_EVNT_ID 0x200b4
5114/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
5115 writes the initial credit value; read returns the current value of the
5116 credit counter. Must be initialized to 64 at start-up. */
5117#define XCM_REG_FIC0_INIT_CRD 0x2040c
5118/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
5119 writes the initial credit value; read returns the current value of the
5120 credit counter. Must be initialized to 64 at start-up. */
5121#define XCM_REG_FIC1_INIT_CRD 0x20410
5122#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
5123#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
5124#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
5125#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
5126/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
5127 - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
5128 ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
5129 ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
5130#define XCM_REG_GR_ARB_TYPE 0x2020c
5131/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
5132 highest priority is 3. It is supposed that the Channel group is the
5133 compliment of the other 3 groups. */
5134#define XCM_REG_GR_LD0_PR 0x20214
5135/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
5136 highest priority is 3. It is supposed that the Channel group is the
5137 compliment of the other 3 groups. */
5138#define XCM_REG_GR_LD1_PR 0x20218
5139/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
5140 disregarded; acknowledge output is deasserted; all other signals are
5141 treated as usual; if 1 - normal activity. */
5142#define XCM_REG_NIG0_IFEN 0x20038
5143/* [RC 1] Set at message length mismatch (relative to last indication) at
5144 the nig0 interface. */
5145#define XCM_REG_NIG0_LENGTH_MIS 0x20238
5146/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
5147 weight 8 (the most prioritised); 1 stands for weight 1(least
5148 prioritised); 2 stands for weight 2; tc. */
5149#define XCM_REG_NIG0_WEIGHT 0x200d4
5150/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
5151 disregarded; acknowledge output is deasserted; all other signals are
5152 treated as usual; if 1 - normal activity. */
5153#define XCM_REG_NIG1_IFEN 0x2003c
5154/* [RC 1] Set at message length mismatch (relative to last indication) at
5155 the nig1 interface. */
5156#define XCM_REG_NIG1_LENGTH_MIS 0x2023c
5157/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
5158 sent to STORM; for a specific connection type. The double REG-pairs are
5159 used in order to align to STORM context row size of 128 bits. The offset
5160 of these data in the STORM context is always 0. Index _i stands for the
5161 connection type (one of 16). */
5162#define XCM_REG_N_SM_CTX_LD_0 0x20060
5163#define XCM_REG_N_SM_CTX_LD_1 0x20064
5164#define XCM_REG_N_SM_CTX_LD_2 0x20068
5165#define XCM_REG_N_SM_CTX_LD_3 0x2006c
5166#define XCM_REG_N_SM_CTX_LD_4 0x20070
5167#define XCM_REG_N_SM_CTX_LD_5 0x20074
5168/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
5169 acknowledge output is deasserted; all other signals are treated as usual;
5170 if 1 - normal activity. */
5171#define XCM_REG_PBF_IFEN 0x20034
5172/* [RC 1] Set at message length mismatch (relative to last indication) at
5173 the pbf interface. */
5174#define XCM_REG_PBF_LENGTH_MIS 0x20234
5175/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
5176 weight 8 (the most prioritised); 1 stands for weight 1(least
5177 prioritised); 2 stands for weight 2; tc. */
5178#define XCM_REG_PBF_WEIGHT 0x200d0
5179#define XCM_REG_PHYS_QNUM3_0 0x20100
5180#define XCM_REG_PHYS_QNUM3_1 0x20104
5181/* [RW 8] The Event ID for Timers formatting in case of stop done. */
5182#define XCM_REG_STOP_EVNT_ID 0x200b8
5183/* [RC 1] Set at message length mismatch (relative to last indication) at
5184 the STORM interface. */
5185#define XCM_REG_STORM_LENGTH_MIS 0x2021c
5186/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
5187 weight 8 (the most prioritised); 1 stands for weight 1(least
5188 prioritised); 2 stands for weight 2; tc. */
5189#define XCM_REG_STORM_WEIGHT 0x200bc
5190/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
5191 disregarded; acknowledge output is deasserted; all other signals are
5192 treated as usual; if 1 - normal activity. */
5193#define XCM_REG_STORM_XCM_IFEN 0x20010
5194/* [RW 4] Timers output initial credit. Max credit available - 15.Write
5195 writes the initial credit value; read returns the current value of the
5196 credit counter. Must be initialized to 4 at start-up. */
5197#define XCM_REG_TM_INIT_CRD 0x2041c
5198/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
5199 weight 8 (the most prioritised); 1 stands for weight 1(least
5200 prioritised); 2 stands for weight 2; tc. */
5201#define XCM_REG_TM_WEIGHT 0x200ec
5202/* [RW 28] The CM header for Timers expiration command. */
5203#define XCM_REG_TM_XCM_HDR 0x200a8
5204/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
5205 disregarded; acknowledge output is deasserted; all other signals are
5206 treated as usual; if 1 - normal activity. */
5207#define XCM_REG_TM_XCM_IFEN 0x2001c
5208/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
5209 disregarded; acknowledge output is deasserted; all other signals are
5210 treated as usual; if 1 - normal activity. */
5211#define XCM_REG_TSEM_IFEN 0x20024
5212/* [RC 1] Set at message length mismatch (relative to last indication) at
5213 the tsem interface. */
5214#define XCM_REG_TSEM_LENGTH_MIS 0x20224
5215/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
5216 weight 8 (the most prioritised); 1 stands for weight 1(least
5217 prioritised); 2 stands for weight 2; tc. */
5218#define XCM_REG_TSEM_WEIGHT 0x200c0
5219/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
5220#define XCM_REG_UNA_GT_NXT_Q 0x20120
5221/* [RW 1] Input usem Interface enable. If 0 - the valid input is
5222 disregarded; acknowledge output is deasserted; all other signals are
5223 treated as usual; if 1 - normal activity. */
5224#define XCM_REG_USEM_IFEN 0x2002c
5225/* [RC 1] Message length mismatch (relative to last indication) at the usem
5226 interface. */
5227#define XCM_REG_USEM_LENGTH_MIS 0x2022c
5228/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
5229 weight 8 (the most prioritised); 1 stands for weight 1(least
5230 prioritised); 2 stands for weight 2; tc. */
5231#define XCM_REG_USEM_WEIGHT 0x200c8
5232#define XCM_REG_WU_DA_CNT_CMD00 0x201d4
5233#define XCM_REG_WU_DA_CNT_CMD01 0x201d8
5234#define XCM_REG_WU_DA_CNT_CMD10 0x201dc
5235#define XCM_REG_WU_DA_CNT_CMD11 0x201e0
5236#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
5237#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
5238#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
5239#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
5240#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
5241#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
5242#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
5243#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
5244/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
5245 acknowledge output is deasserted; all other signals are treated as usual;
5246 if 1 - normal activity. */
5247#define XCM_REG_XCM_CFC_IFEN 0x20050
5248/* [RW 14] Interrupt mask register #0 read/write */
5249#define XCM_REG_XCM_INT_MASK 0x202b4
5250/* [R 14] Interrupt register #0 read */
5251#define XCM_REG_XCM_INT_STS 0x202a8
5252/* [RW 30] Parity mask register #0 read/write */
5253#define XCM_REG_XCM_PRTY_MASK 0x202c4
5254/* [R 30] Parity register #0 read */
5255#define XCM_REG_XCM_PRTY_STS 0x202b8
5256/* [RC 30] Parity register #0 read clear */
5257#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc
5258
5259/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
5260 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
5261 Is used to determine the number of the AG context REG-pairs written back;
5262 when the Reg1WbFlg isn't set. */
5263#define XCM_REG_XCM_REG0_SZ 0x200f4
5264/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
5265 disregarded; valid is deasserted; all other signals are treated as usual;
5266 if 1 - normal activity. */
5267#define XCM_REG_XCM_STORM0_IFEN 0x20004
5268/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
5269 disregarded; valid is deasserted; all other signals are treated as usual;
5270 if 1 - normal activity. */
5271#define XCM_REG_XCM_STORM1_IFEN 0x20008
5272/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
5273 disregarded; acknowledge output is deasserted; all other signals are
5274 treated as usual; if 1 - normal activity. */
5275#define XCM_REG_XCM_TM_IFEN 0x20020
5276/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
5277 disregarded; valid is deasserted; all other signals are treated as usual;
5278 if 1 - normal activity. */
5279#define XCM_REG_XCM_XQM_IFEN 0x2000c
5280/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
5281#define XCM_REG_XCM_XQM_USE_Q 0x200f0
5282/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
5283#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc
5284/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
5285 the initial credit value; read returns the current value of the credit
5286 counter. Must be initialized to 32 at start-up. */
5287#define XCM_REG_XQM_INIT_CRD 0x20420
5288/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
5289 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
5290 prioritised); 2 stands for weight 2; tc. */
5291#define XCM_REG_XQM_P_WEIGHT 0x200e4
5292/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
5293 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
5294 prioritised); 2 stands for weight 2; tc. */
5295#define XCM_REG_XQM_S_WEIGHT 0x200e8
5296/* [RW 28] The CM header value for QM request (primary). */
5297#define XCM_REG_XQM_XCM_HDR_P 0x200a0
5298/* [RW 28] The CM header value for QM request (secondary). */
5299#define XCM_REG_XQM_XCM_HDR_S 0x200a4
5300/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
5301 acknowledge output is deasserted; all other signals are treated as usual;
5302 if 1 - normal activity. */
5303#define XCM_REG_XQM_XCM_IFEN 0x20014
5304/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
5305 acknowledge output is deasserted; all other signals are treated as usual;
5306 if 1 - normal activity. */
5307#define XCM_REG_XSDM_IFEN 0x20018
5308/* [RC 1] Set at message length mismatch (relative to last indication) at
5309 the SDM interface. */
5310#define XCM_REG_XSDM_LENGTH_MIS 0x20220
5311/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
5312 weight 8 (the most prioritised); 1 stands for weight 1(least
5313 prioritised); 2 stands for weight 2; tc. */
5314#define XCM_REG_XSDM_WEIGHT 0x200e0
5315/* [RW 17] Indirect access to the descriptor table of the XX protection
5316 mechanism. The fields are: [5:0] - message length; 11:6] - message
5317 pointer; 16:12] - next pointer. */
5318#define XCM_REG_XX_DESCR_TABLE 0x20480
5319#define XCM_REG_XX_DESCR_TABLE_SIZE 32
5320/* [R 6] Used to read the XX protection Free counter. */
5321#define XCM_REG_XX_FREE 0x20240
5322/* [RW 6] Initial value for the credit counter; responsible for fulfilling
5323 of the Input Stage XX protection buffer by the XX protection pending
5324 messages. Max credit available - 3.Write writes the initial credit value;
5325 read returns the current value of the credit counter. Must be initialized
5326 to 2 at start-up. */
5327#define XCM_REG_XX_INIT_CRD 0x20424
5328/* [RW 6] The maximum number of pending messages; which may be stored in XX
5329 protection. ~xcm_registers_xx_free.xx_free read on read. */
5330#define XCM_REG_XX_MSG_NUM 0x20428
5331/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
5332#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
5333#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
5334#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
5335#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
5336#define XMAC_CTRL_REG_RX_EN (0x1<<1)
5337#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
5338#define XMAC_CTRL_REG_TX_EN (0x1<<0)
5339#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
5340#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
5341#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
5342#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
5343#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
5344#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5)
5345#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60
5346#define XMAC_REG_CTRL 0
5347/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
5348 * packets transmitted by the MAC */
5349#define XMAC_REG_CTRL_SA_HI 0x2c
5350/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
5351 * packets transmitted by the MAC */
5352#define XMAC_REG_CTRL_SA_LO 0x28
5353#define XMAC_REG_PAUSE_CTRL 0x68
5354#define XMAC_REG_PFC_CTRL 0x70
5355#define XMAC_REG_PFC_CTRL_HI 0x74
5356#define XMAC_REG_RX_LSS_STATUS 0x58
5357/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
5358 * CRC in strip mode */
5359#define XMAC_REG_RX_MAX_SIZE 0x40
5360#define XMAC_REG_TX_CTRL 0x20
5361/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
5362 The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
5363 header pointer. */
5364#define XCM_REG_XX_TABLE 0x20500
5365/* [RW 8] The event id for aggregated interrupt 0 */
5366#define XSDM_REG_AGG_INT_EVENT_0 0x166038
5367#define XSDM_REG_AGG_INT_EVENT_1 0x16603c
5368#define XSDM_REG_AGG_INT_EVENT_10 0x166060
5369#define XSDM_REG_AGG_INT_EVENT_11 0x166064
5370#define XSDM_REG_AGG_INT_EVENT_12 0x166068
5371#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
5372#define XSDM_REG_AGG_INT_EVENT_14 0x166070
5373#define XSDM_REG_AGG_INT_EVENT_2 0x166040
5374#define XSDM_REG_AGG_INT_EVENT_3 0x166044
5375#define XSDM_REG_AGG_INT_EVENT_4 0x166048
5376#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
5377#define XSDM_REG_AGG_INT_EVENT_6 0x166050
5378#define XSDM_REG_AGG_INT_EVENT_7 0x166054
5379#define XSDM_REG_AGG_INT_EVENT_8 0x166058
5380#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
5381/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
5382 or auto-mask-mode (1) */
5383#define XSDM_REG_AGG_INT_MODE_0 0x1661b8
5384#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
5385/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
5386#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
5387/* [RW 16] The maximum value of the completion counter #0 */
5388#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
5389/* [RW 16] The maximum value of the completion counter #1 */
5390#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
5391/* [RW 16] The maximum value of the completion counter #2 */
5392#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
5393/* [RW 16] The maximum value of the completion counter #3 */
5394#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
5395/* [RW 13] The start address in the internal RAM for the completion
5396 counters. */
5397#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c
5398#define XSDM_REG_ENABLE_IN1 0x166238
5399#define XSDM_REG_ENABLE_IN2 0x16623c
5400#define XSDM_REG_ENABLE_OUT1 0x166240
5401#define XSDM_REG_ENABLE_OUT2 0x166244
5402/* [RW 4] The initial number of messages that can be sent to the pxp control
5403 interface without receiving any ACK. */
5404#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc
5405/* [ST 32] The number of ACK after placement messages received */
5406#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c
5407/* [ST 32] The number of packet end messages received from the parser */
5408#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274
5409/* [ST 32] The number of requests received from the pxp async if */
5410#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278
5411/* [ST 32] The number of commands received in queue 0 */
5412#define XSDM_REG_NUM_OF_Q0_CMD 0x166248
5413/* [ST 32] The number of commands received in queue 10 */
5414#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c
5415/* [ST 32] The number of commands received in queue 11 */
5416#define XSDM_REG_NUM_OF_Q11_CMD 0x166270
5417/* [ST 32] The number of commands received in queue 1 */
5418#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c
5419/* [ST 32] The number of commands received in queue 3 */
5420#define XSDM_REG_NUM_OF_Q3_CMD 0x166250
5421/* [ST 32] The number of commands received in queue 4 */
5422#define XSDM_REG_NUM_OF_Q4_CMD 0x166254
5423/* [ST 32] The number of commands received in queue 5 */
5424#define XSDM_REG_NUM_OF_Q5_CMD 0x166258
5425/* [ST 32] The number of commands received in queue 6 */
5426#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c
5427/* [ST 32] The number of commands received in queue 7 */
5428#define XSDM_REG_NUM_OF_Q7_CMD 0x166260
5429/* [ST 32] The number of commands received in queue 8 */
5430#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
5431/* [ST 32] The number of commands received in queue 9 */
5432#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
5433/* [RW 13] The start address in the internal RAM for queue counters */
5434#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
5435/* [W 17] Generate an operation after completion; bit-16 is
5436 * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
5437 * bits 4:0 are the T124Param[4:0] */
5438#define XSDM_REG_OPERATION_GEN 0x1664c4
5439/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
5440#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
5441/* [R 1] parser fifo empty in sdm_sync block */
5442#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550
5443/* [R 1] parser serial fifo empty in sdm_sync block */
5444#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558
5445/* [RW 32] Tick for timer counter. Applicable only when
5446 ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
5447#define XSDM_REG_TIMER_TICK 0x166000
5448/* [RW 32] Interrupt mask register #0 read/write */
5449#define XSDM_REG_XSDM_INT_MASK_0 0x16629c
5450#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
5451/* [R 32] Interrupt register #0 read */
5452#define XSDM_REG_XSDM_INT_STS_0 0x166290
5453#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
5454/* [RW 11] Parity mask register #0 read/write */
5455#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
5456/* [R 11] Parity register #0 read */
5457#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
5458/* [RC 11] Parity register #0 read clear */
5459#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
5460/* [RW 5] The number of time_slots in the arbitration cycle */
5461#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
5462/* [RW 3] The source that is associated with arbitration element 0. Source
5463 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
5464 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
5465#define XSEM_REG_ARB_ELEMENT0 0x280020
5466/* [RW 3] The source that is associated with arbitration element 1. Source
5467 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
5468 sleeping thread with priority 1; 4- sleeping thread with priority 2.
5469 Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
5470#define XSEM_REG_ARB_ELEMENT1 0x280024
5471/* [RW 3] The source that is associated with arbitration element 2. Source
5472 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
5473 sleeping thread with priority 1; 4- sleeping thread with priority 2.
5474 Could not be equal to register ~xsem_registers_arb_element0.arb_element0
5475 and ~xsem_registers_arb_element1.arb_element1 */
5476#define XSEM_REG_ARB_ELEMENT2 0x280028
5477/* [RW 3] The source that is associated with arbitration element 3. Source
5478 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
5479 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
5480 not be equal to register ~xsem_registers_arb_element0.arb_element0 and
5481 ~xsem_registers_arb_element1.arb_element1 and
5482 ~xsem_registers_arb_element2.arb_element2 */
5483#define XSEM_REG_ARB_ELEMENT3 0x28002c
5484/* [RW 3] The source that is associated with arbitration element 4. Source
5485 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
5486 sleeping thread with priority 1; 4- sleeping thread with priority 2.
5487 Could not be equal to register ~xsem_registers_arb_element0.arb_element0
5488 and ~xsem_registers_arb_element1.arb_element1 and
5489 ~xsem_registers_arb_element2.arb_element2 and
5490 ~xsem_registers_arb_element3.arb_element3 */
5491#define XSEM_REG_ARB_ELEMENT4 0x280030
5492#define XSEM_REG_ENABLE_IN 0x2800a4
5493#define XSEM_REG_ENABLE_OUT 0x2800a8
5494/* [RW 32] This address space contains all registers and memories that are
5495 placed in SEM_FAST block. The SEM_FAST registers are described in
5496 appendix B. In order to access the sem_fast registers the base address
5497 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
5498#define XSEM_REG_FAST_MEMORY 0x2a0000
5499/* [RW 1] Disables input messages from FIC0 May be updated during run_time
5500 by the microcode */
5501#define XSEM_REG_FIC0_DISABLE 0x280224
5502/* [RW 1] Disables input messages from FIC1 May be updated during run_time
5503 by the microcode */
5504#define XSEM_REG_FIC1_DISABLE 0x280234
5505/* [RW 15] Interrupt table Read and write access to it is not possible in
5506 the middle of the work */
5507#define XSEM_REG_INT_TABLE 0x280400
5508/* [ST 24] Statistics register. The number of messages that entered through
5509 FIC0 */
5510#define XSEM_REG_MSG_NUM_FIC0 0x280000
5511/* [ST 24] Statistics register. The number of messages that entered through
5512 FIC1 */
5513#define XSEM_REG_MSG_NUM_FIC1 0x280004
5514/* [ST 24] Statistics register. The number of messages that were sent to
5515 FOC0 */
5516#define XSEM_REG_MSG_NUM_FOC0 0x280008
5517/* [ST 24] Statistics register. The number of messages that were sent to
5518 FOC1 */
5519#define XSEM_REG_MSG_NUM_FOC1 0x28000c
5520/* [ST 24] Statistics register. The number of messages that were sent to
5521 FOC2 */
5522#define XSEM_REG_MSG_NUM_FOC2 0x280010
5523/* [ST 24] Statistics register. The number of messages that were sent to
5524 FOC3 */
5525#define XSEM_REG_MSG_NUM_FOC3 0x280014
5526/* [RW 1] Disables input messages from the passive buffer May be updated
5527 during run_time by the microcode */
5528#define XSEM_REG_PAS_DISABLE 0x28024c
5529/* [WB 128] Debug only. Passive buffer memory */
5530#define XSEM_REG_PASSIVE_BUFFER 0x282000
5531/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
5532#define XSEM_REG_PRAM 0x2c0000
5533/* [R 16] Valid sleeping threads indication have bit per thread */
5534#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c
5535/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
5536#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0
5537/* [RW 16] List of free threads . There is a bit per thread. */
5538#define XSEM_REG_THREADS_LIST 0x2802e4
5539/* [RW 3] The arbitration scheme of time_slot 0 */
5540#define XSEM_REG_TS_0_AS 0x280038
5541/* [RW 3] The arbitration scheme of time_slot 10 */
5542#define XSEM_REG_TS_10_AS 0x280060
5543/* [RW 3] The arbitration scheme of time_slot 11 */
5544#define XSEM_REG_TS_11_AS 0x280064
5545/* [RW 3] The arbitration scheme of time_slot 12 */
5546#define XSEM_REG_TS_12_AS 0x280068
5547/* [RW 3] The arbitration scheme of time_slot 13 */
5548#define XSEM_REG_TS_13_AS 0x28006c
5549/* [RW 3] The arbitration scheme of time_slot 14 */
5550#define XSEM_REG_TS_14_AS 0x280070
5551/* [RW 3] The arbitration scheme of time_slot 15 */
5552#define XSEM_REG_TS_15_AS 0x280074
5553/* [RW 3] The arbitration scheme of time_slot 16 */
5554#define XSEM_REG_TS_16_AS 0x280078
5555/* [RW 3] The arbitration scheme of time_slot 17 */
5556#define XSEM_REG_TS_17_AS 0x28007c
5557/* [RW 3] The arbitration scheme of time_slot 18 */
5558#define XSEM_REG_TS_18_AS 0x280080
5559/* [RW 3] The arbitration scheme of time_slot 1 */
5560#define XSEM_REG_TS_1_AS 0x28003c
5561/* [RW 3] The arbitration scheme of time_slot 2 */
5562#define XSEM_REG_TS_2_AS 0x280040
5563/* [RW 3] The arbitration scheme of time_slot 3 */
5564#define XSEM_REG_TS_3_AS 0x280044
5565/* [RW 3] The arbitration scheme of time_slot 4 */
5566#define XSEM_REG_TS_4_AS 0x280048
5567/* [RW 3] The arbitration scheme of time_slot 5 */
5568#define XSEM_REG_TS_5_AS 0x28004c
5569/* [RW 3] The arbitration scheme of time_slot 6 */
5570#define XSEM_REG_TS_6_AS 0x280050
5571/* [RW 3] The arbitration scheme of time_slot 7 */
5572#define XSEM_REG_TS_7_AS 0x280054
5573/* [RW 3] The arbitration scheme of time_slot 8 */
5574#define XSEM_REG_TS_8_AS 0x280058
5575/* [RW 3] The arbitration scheme of time_slot 9 */
5576#define XSEM_REG_TS_9_AS 0x28005c
5577/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
5578 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
5579#define XSEM_REG_VFPF_ERR_NUM 0x280380
5580/* [RW 32] Interrupt mask register #0 read/write */
5581#define XSEM_REG_XSEM_INT_MASK_0 0x280110
5582#define XSEM_REG_XSEM_INT_MASK_1 0x280120
5583/* [R 32] Interrupt register #0 read */
5584#define XSEM_REG_XSEM_INT_STS_0 0x280104
5585#define XSEM_REG_XSEM_INT_STS_1 0x280114
5586/* [RW 32] Parity mask register #0 read/write */
5587#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
5588#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
5589/* [R 32] Parity register #0 read */
5590#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
5591#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
5592/* [RC 32] Parity register #0 read clear */
5593#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
5594#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
5595#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
5596#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
5597#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
5598#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
5599#define MCPR_NVM_COMMAND_DOIT (1L<<4)
5600#define MCPR_NVM_COMMAND_DONE (1L<<3)
5601#define MCPR_NVM_COMMAND_FIRST (1L<<7)
5602#define MCPR_NVM_COMMAND_LAST (1L<<8)
5603#define MCPR_NVM_COMMAND_WR (1L<<5)
5604#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
5605#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
5606#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
5607#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
5608#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
5609#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
5610#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
5611#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
5612#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
5613#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
5614#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
5615#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
5616#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
5617#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
5618#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
5619#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
5620#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
5621#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
5622#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
5623#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
5624#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
5625#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
5626#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
5627#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
5628#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
5629#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
5630#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
5631#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
5632#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
5633#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
5634#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
5635#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
5636#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
5637#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
5638#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
5639#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
5640#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
5641#define EMAC_LED_100MB_OVERRIDE (1L<<2)
5642#define EMAC_LED_10MB_OVERRIDE (1L<<3)
5643#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
5644#define EMAC_LED_OVERRIDE (1L<<0)
5645#define EMAC_LED_TRAFFIC (1L<<6)
5646#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
5647#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
5648#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
5649#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
5650#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
5651#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
5652#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
5653#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
5654#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
5655#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
5656#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
5657#define EMAC_MDIO_STATUS_10MB (1L<<1)
5658#define EMAC_MODE_25G_MODE (1L<<5)
5659#define EMAC_MODE_HALF_DUPLEX (1L<<1)
5660#define EMAC_MODE_PORT_GMII (2L<<2)
5661#define EMAC_MODE_PORT_MII (1L<<2)
5662#define EMAC_MODE_PORT_MII_10M (3L<<2)
5663#define EMAC_MODE_RESET (1L<<0)
5664#define EMAC_REG_EMAC_LED 0xc
5665#define EMAC_REG_EMAC_MAC_MATCH 0x10
5666#define EMAC_REG_EMAC_MDIO_COMM 0xac
5667#define EMAC_REG_EMAC_MDIO_MODE 0xb4
5668#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
5669#define EMAC_REG_EMAC_MODE 0x0
5670#define EMAC_REG_EMAC_RX_MODE 0xc8
5671#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
5672#define EMAC_REG_EMAC_RX_STAT_AC 0x180
5673#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
5674#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
5675#define EMAC_REG_EMAC_TX_MODE 0xbc
5676#define EMAC_REG_EMAC_TX_STAT_AC 0x280
5677#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
5678#define EMAC_REG_RX_PFC_MODE 0x320
5679#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
5680#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
5681#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
5682#define EMAC_REG_RX_PFC_PARAM 0x324
5683#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
5684#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
5685#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
5686#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
5687#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
5688#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
5689#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
5690#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
5691#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
5692#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
5693#define EMAC_RX_MODE_FLOW_EN (1L<<2)
5694#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
5695#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
5696#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
5697#define EMAC_RX_MODE_RESET (1L<<0)
5698#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
5699#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
5700#define EMAC_TX_MODE_FLOW_EN (1L<<4)
5701#define EMAC_TX_MODE_RESET (1L<<0)
5702#define MISC_REGISTERS_GPIO_0 0
5703#define MISC_REGISTERS_GPIO_1 1
5704#define MISC_REGISTERS_GPIO_2 2
5705#define MISC_REGISTERS_GPIO_3 3
5706#define MISC_REGISTERS_GPIO_CLR_POS 16
5707#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
5708#define MISC_REGISTERS_GPIO_FLOAT_POS 24
5709#define MISC_REGISTERS_GPIO_HIGH 1
5710#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
5711#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
5712#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
5713#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
5714#define MISC_REGISTERS_GPIO_INT_SET_POS 16
5715#define MISC_REGISTERS_GPIO_LOW 0
5716#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
5717#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
5718#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
5719#define MISC_REGISTERS_GPIO_SET_POS 8
5720#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
5721#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
5722#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
5723#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
5724#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
5725#define MISC_REGISTERS_RESET_REG_1_SET 0x584
5726#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
5727#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
5728#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25)
5729#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19)
5730#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17)
5731#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
5732#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
5733#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
5734#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
5735#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
5736#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
5737#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
5738#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
5739#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
5740#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
5741#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
5742#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
5743#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
5744#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13)
5745#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
5746#define MISC_REGISTERS_RESET_REG_2_SET 0x594
5747#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20)
5748#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21)
5749#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22)
5750#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23)
5751#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
5752#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
5753#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
5754#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
5755#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
5756#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
5757#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
5758#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
5759#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
5760#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
5761#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
5762#define MISC_REGISTERS_SPIO_4 4
5763#define MISC_REGISTERS_SPIO_5 5
5764#define MISC_REGISTERS_SPIO_7 7
5765#define MISC_REGISTERS_SPIO_CLR_POS 16
5766#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
5767#define MISC_REGISTERS_SPIO_FLOAT_POS 24
5768#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
5769#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
5770#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
5771#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
5772#define MISC_REGISTERS_SPIO_SET_POS 8
5773#define HW_LOCK_DRV_FLAGS 10
5774#define HW_LOCK_MAX_RESOURCE_VALUE 31
5775#define HW_LOCK_RESOURCE_GPIO 1
5776#define HW_LOCK_RESOURCE_MDIO 0
5777#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
5778#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
5779#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
5780#define HW_LOCK_RESOURCE_SPIO 2
5781#define HW_LOCK_RESOURCE_RESET 5
5782#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5783#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
5784#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
5785#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
5786#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
5787#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
5788#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
5789#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
5790#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
5791#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
5792#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
5793#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
5794#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
5795#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
5796#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
5797#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
5798#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
5799#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
5800#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
5801#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
5802#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
5803#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
5804#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
5805#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
5806#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
5807#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
5808#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
5809#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
5810#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31)
5811#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
5812#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
5813#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
5814#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
5815#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
5816#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
5817#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
5818#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
5819#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
5820#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
5821#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
5822#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
5823#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
5824#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
5825#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
5826#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
5827#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
5828#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
5829#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
5830#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
5831#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
5832#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
5833#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
5834#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
5835#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
5836#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
5837#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
5838#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
5839#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
5840#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
5841#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
5842#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
5843#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
5844#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
5845#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
5846
5847#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5)
5848#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9)
5849
5850#define RESERVED_GENERAL_ATTENTION_BIT_0 0
5851
5852#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
5853#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
5854
5855#define RESERVED_GENERAL_ATTENTION_BIT_6 6
5856#define RESERVED_GENERAL_ATTENTION_BIT_7 7
5857#define RESERVED_GENERAL_ATTENTION_BIT_8 8
5858#define RESERVED_GENERAL_ATTENTION_BIT_9 9
5859#define RESERVED_GENERAL_ATTENTION_BIT_10 10
5860#define RESERVED_GENERAL_ATTENTION_BIT_11 11
5861#define RESERVED_GENERAL_ATTENTION_BIT_12 12
5862#define RESERVED_GENERAL_ATTENTION_BIT_13 13
5863#define RESERVED_GENERAL_ATTENTION_BIT_14 14
5864#define RESERVED_GENERAL_ATTENTION_BIT_15 15
5865#define RESERVED_GENERAL_ATTENTION_BIT_16 16
5866#define RESERVED_GENERAL_ATTENTION_BIT_17 17
5867#define RESERVED_GENERAL_ATTENTION_BIT_18 18
5868#define RESERVED_GENERAL_ATTENTION_BIT_19 19
5869#define RESERVED_GENERAL_ATTENTION_BIT_20 20
5870#define RESERVED_GENERAL_ATTENTION_BIT_21 21
5871
5872/* storm asserts attention bits */
5873#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
5874#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
5875#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
5876#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
5877
5878/* mcp error attention bit */
5879#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
5880
5881/*E1H NIG status sync attention mapped to group 4-7*/
5882#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
5883#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
5884#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
5885#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
5886#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
5887#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
5888#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
5889#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
5890
5891
5892#define LATCHED_ATTN_RBCR 23
5893#define LATCHED_ATTN_RBCT 24
5894#define LATCHED_ATTN_RBCN 25
5895#define LATCHED_ATTN_RBCU 26
5896#define LATCHED_ATTN_RBCP 27
5897#define LATCHED_ATTN_TIMEOUT_GRC 28
5898#define LATCHED_ATTN_RSVD_GRC 29
5899#define LATCHED_ATTN_ROM_PARITY_MCP 30
5900#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
5901#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
5902#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
5903
5904#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
5905#define GENERAL_ATTEN_OFFSET(atten_name)\
5906 (1UL << ((94 + atten_name) % 32))
5907/*
5908 * This file defines GRC base address for every block.
5909 * This file is included by chipsim, asm microcode and cpp microcode.
5910 * These values are used in Design.xml on regBase attribute
5911 * Use the base with the generated offsets of specific registers.
5912 */
5913
5914#define GRCBASE_PXPCS 0x000000
5915#define GRCBASE_PCICONFIG 0x002000
5916#define GRCBASE_PCIREG 0x002400
5917#define GRCBASE_EMAC0 0x008000
5918#define GRCBASE_EMAC1 0x008400
5919#define GRCBASE_DBU 0x008800
5920#define GRCBASE_MISC 0x00A000
5921#define GRCBASE_DBG 0x00C000
5922#define GRCBASE_NIG 0x010000
5923#define GRCBASE_XCM 0x020000
5924#define GRCBASE_PRS 0x040000
5925#define GRCBASE_SRCH 0x040400
5926#define GRCBASE_TSDM 0x042000
5927#define GRCBASE_TCM 0x050000
5928#define GRCBASE_BRB1 0x060000
5929#define GRCBASE_MCP 0x080000
5930#define GRCBASE_UPB 0x0C1000
5931#define GRCBASE_CSDM 0x0C2000
5932#define GRCBASE_USDM 0x0C4000
5933#define GRCBASE_CCM 0x0D0000
5934#define GRCBASE_UCM 0x0E0000
5935#define GRCBASE_CDU 0x101000
5936#define GRCBASE_DMAE 0x102000
5937#define GRCBASE_PXP 0x103000
5938#define GRCBASE_CFC 0x104000
5939#define GRCBASE_HC 0x108000
5940#define GRCBASE_PXP2 0x120000
5941#define GRCBASE_PBF 0x140000
5942#define GRCBASE_UMAC0 0x160000
5943#define GRCBASE_UMAC1 0x160400
5944#define GRCBASE_XPB 0x161000
5945#define GRCBASE_MSTAT0 0x162000
5946#define GRCBASE_MSTAT1 0x162800
5947#define GRCBASE_XMAC0 0x163000
5948#define GRCBASE_XMAC1 0x163800
5949#define GRCBASE_TIMERS 0x164000
5950#define GRCBASE_XSDM 0x166000
5951#define GRCBASE_QM 0x168000
5952#define GRCBASE_DQ 0x170000
5953#define GRCBASE_TSEM 0x180000
5954#define GRCBASE_CSEM 0x200000
5955#define GRCBASE_XSEM 0x280000
5956#define GRCBASE_USEM 0x300000
5957#define GRCBASE_MISC_AEU GRCBASE_MISC
5958
5959
5960/* offset of configuration space in the pci core register */
5961#define PCICFG_OFFSET 0x2000
5962#define PCICFG_VENDOR_ID_OFFSET 0x00
5963#define PCICFG_DEVICE_ID_OFFSET 0x02
5964#define PCICFG_COMMAND_OFFSET 0x04
5965#define PCICFG_COMMAND_IO_SPACE (1<<0)
5966#define PCICFG_COMMAND_MEM_SPACE (1<<1)
5967#define PCICFG_COMMAND_BUS_MASTER (1<<2)
5968#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
5969#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
5970#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
5971#define PCICFG_COMMAND_PERR_ENA (1<<6)
5972#define PCICFG_COMMAND_STEPPING (1<<7)
5973#define PCICFG_COMMAND_SERR_ENA (1<<8)
5974#define PCICFG_COMMAND_FAST_B2B (1<<9)
5975#define PCICFG_COMMAND_INT_DISABLE (1<<10)
5976#define PCICFG_COMMAND_RESERVED (0x1f<<11)
5977#define PCICFG_STATUS_OFFSET 0x06
5978#define PCICFG_REVESION_ID_OFFSET 0x08
5979#define PCICFG_CACHE_LINE_SIZE 0x0c
5980#define PCICFG_LATENCY_TIMER 0x0d
5981#define PCICFG_BAR_1_LOW 0x10
5982#define PCICFG_BAR_1_HIGH 0x14
5983#define PCICFG_BAR_2_LOW 0x18
5984#define PCICFG_BAR_2_HIGH 0x1c
5985#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
5986#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
5987#define PCICFG_INT_LINE 0x3c
5988#define PCICFG_INT_PIN 0x3d
5989#define PCICFG_PM_CAPABILITY 0x48
5990#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
5991#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
5992#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
5993#define PCICFG_PM_CAPABILITY_DSI (1<<21)
5994#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
5995#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
5996#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
5997#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
5998#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
5999#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
6000#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
6001#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
6002#define PCICFG_PM_CSR_OFFSET 0x4c
6003#define PCICFG_PM_CSR_STATE (0x3<<0)
6004#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
6005#define PCICFG_PM_CSR_PME_STATUS (1<<15)
6006#define PCICFG_MSI_CAP_ID_OFFSET 0x58
6007#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
6008#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
6009#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
6010#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
6011#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
6012#define PCICFG_GRC_ADDRESS 0x78
6013#define PCICFG_GRC_DATA 0x80
6014#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
6015#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
6016#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
6017#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
6018#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
6019
6020#define PCICFG_DEVICE_CONTROL 0xb4
6021#define PCICFG_DEVICE_STATUS 0xb6
6022#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
6023#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
6024#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
6025#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
6026#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
6027#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
6028#define PCICFG_LINK_CONTROL 0xbc
6029
6030
6031#define BAR_USTRORM_INTMEM 0x400000
6032#define BAR_CSTRORM_INTMEM 0x410000
6033#define BAR_XSTRORM_INTMEM 0x420000
6034#define BAR_TSTRORM_INTMEM 0x430000
6035
6036/* for accessing the IGU in case of status block ACK */
6037#define BAR_IGU_INTMEM 0x440000
6038
6039#define BAR_DOORBELL_OFFSET 0x800000
6040
6041#define BAR_ME_REGISTER 0x450000
6042
6043/* config_2 offset */
6044#define GRC_CONFIG_2_SIZE_REG 0x408
6045#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
6046#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
6047#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
6048#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
6049#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
6050#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
6051#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
6052#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
6053#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
6054#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
6055#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
6056#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
6057#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
6058#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
6059#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
6060#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
6061#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
6062#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
6063#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
6064#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
6065#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
6066#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
6067#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
6068#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
6069#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
6070#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
6071#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
6072#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
6073#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
6074#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
6075#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
6076#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
6077#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
6078#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
6079#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
6080#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
6081#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
6082#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
6083#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
6084#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
6085
6086/* config_3 offset */
6087#define GRC_CONFIG_3_SIZE_REG 0x40c
6088#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
6089#define PCI_CONFIG_3_FORCE_PME (1L<<24)
6090#define PCI_CONFIG_3_PME_STATUS (1L<<25)
6091#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
6092#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
6093#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
6094#define PCI_CONFIG_3_PCI_POWER (1L<<31)
6095
6096#define GRC_BAR2_CONFIG 0x4e0
6097#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
6098#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
6099#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
6100#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
6101#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
6102#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
6103#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
6104#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
6105#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
6106#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
6107#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
6108#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
6109#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
6110#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
6111#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
6112#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
6113#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
6114#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
6115
6116#define PCI_PM_DATA_A 0x410
6117#define PCI_PM_DATA_B 0x414
6118#define PCI_ID_VAL1 0x434
6119#define PCI_ID_VAL2 0x438
6120
6121#define PXPCS_TL_CONTROL_5 0x814
6122#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
6123#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
6124#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
6125#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
6126#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
6127#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
6128#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
6129#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
6130#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
6131#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
6132#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
6133#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
6134#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
6135#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
6136#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
6137#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
6138#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
6139#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
6140#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
6141#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
6142#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
6143#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
6144#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
6145#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
6146#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
6147#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
6148#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
6149#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
6150#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
6151#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
6152
6153
6154#define PXPCS_TL_FUNC345_STAT 0x854
6155#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
6156#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
6157 (1 << 28) /* Unsupported Request Error Status in function4, if \
6158 set, generate pcie_err_attn output when this error is seen. WC */
6159#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
6160 (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
6161 generate pcie_err_attn output when this error is seen.. WC */
6162#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
6163 (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
6164 generate pcie_err_attn output when this error is seen.. WC */
6165#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
6166 (1 << 25) /* Receiver Overflow Status Status in function 4, if \
6167 set, generate pcie_err_attn output when this error is seen.. WC \
6168 */
6169#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
6170 (1 << 24) /* Unexpected Completion Status Status in function 4, \
6171 if set, generate pcie_err_attn output when this error is seen. WC \
6172 */
6173#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
6174 (1 << 23) /* Receive UR Statusin function 4. If set, generate \
6175 pcie_err_attn output when this error is seen. WC */
6176#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
6177 (1 << 22) /* Completer Timeout Status Status in function 4, if \
6178 set, generate pcie_err_attn output when this error is seen. WC */
6179#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
6180 (1 << 21) /* Flow Control Protocol Error Status Status in \
6181 function 4, if set, generate pcie_err_attn output when this error \
6182 is seen. WC */
6183#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
6184 (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
6185 generate pcie_err_attn output when this error is seen.. WC */
6186#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
6187#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
6188 (1 << 18) /* Unsupported Request Error Status in function3, if \
6189 set, generate pcie_err_attn output when this error is seen. WC */
6190#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
6191 (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
6192 generate pcie_err_attn output when this error is seen.. WC */
6193#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
6194 (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
6195 generate pcie_err_attn output when this error is seen.. WC */
6196#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
6197 (1 << 15) /* Receiver Overflow Status Status in function 3, if \
6198 set, generate pcie_err_attn output when this error is seen.. WC \
6199 */
6200#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
6201 (1 << 14) /* Unexpected Completion Status Status in function 3, \
6202 if set, generate pcie_err_attn output when this error is seen. WC \
6203 */
6204#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
6205 (1 << 13) /* Receive UR Statusin function 3. If set, generate \
6206 pcie_err_attn output when this error is seen. WC */
6207#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
6208 (1 << 12) /* Completer Timeout Status Status in function 3, if \
6209 set, generate pcie_err_attn output when this error is seen. WC */
6210#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
6211 (1 << 11) /* Flow Control Protocol Error Status Status in \
6212 function 3, if set, generate pcie_err_attn output when this error \
6213 is seen. WC */
6214#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
6215 (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
6216 generate pcie_err_attn output when this error is seen.. WC */
6217#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
6218#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
6219 (1 << 8) /* Unsupported Request Error Status for Function 2, if \
6220 set, generate pcie_err_attn output when this error is seen. WC */
6221#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
6222 (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
6223 generate pcie_err_attn output when this error is seen.. WC */
6224#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
6225 (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
6226 generate pcie_err_attn output when this error is seen.. WC */
6227#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
6228 (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
6229 set, generate pcie_err_attn output when this error is seen.. WC \
6230 */
6231#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
6232 (1 << 4) /* Unexpected Completion Status Status for Function 2, \
6233 if set, generate pcie_err_attn output when this error is seen. WC \
6234 */
6235#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
6236 (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
6237 pcie_err_attn output when this error is seen. WC */
6238#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
6239 (1 << 2) /* Completer Timeout Status Status for Function 2, if \
6240 set, generate pcie_err_attn output when this error is seen. WC */
6241#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
6242 (1 << 1) /* Flow Control Protocol Error Status Status for \
6243 Function 2, if set, generate pcie_err_attn output when this error \
6244 is seen. WC */
6245#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
6246 (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
6247 generate pcie_err_attn output when this error is seen.. WC */
6248
6249
6250#define PXPCS_TL_FUNC678_STAT 0x85C
6251#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
6252#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
6253 (1 << 28) /* Unsupported Request Error Status in function7, if \
6254 set, generate pcie_err_attn output when this error is seen. WC */
6255#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
6256 (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
6257 generate pcie_err_attn output when this error is seen.. WC */
6258#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
6259 (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
6260 generate pcie_err_attn output when this error is seen.. WC */
6261#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
6262 (1 << 25) /* Receiver Overflow Status Status in function 7, if \
6263 set, generate pcie_err_attn output when this error is seen.. WC \
6264 */
6265#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
6266 (1 << 24) /* Unexpected Completion Status Status in function 7, \
6267 if set, generate pcie_err_attn output when this error is seen. WC \
6268 */
6269#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
6270 (1 << 23) /* Receive UR Statusin function 7. If set, generate \
6271 pcie_err_attn output when this error is seen. WC */
6272#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
6273 (1 << 22) /* Completer Timeout Status Status in function 7, if \
6274 set, generate pcie_err_attn output when this error is seen. WC */
6275#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
6276 (1 << 21) /* Flow Control Protocol Error Status Status in \
6277 function 7, if set, generate pcie_err_attn output when this error \
6278 is seen. WC */
6279#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
6280 (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
6281 generate pcie_err_attn output when this error is seen.. WC */
6282#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
6283#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
6284 (1 << 18) /* Unsupported Request Error Status in function6, if \
6285 set, generate pcie_err_attn output when this error is seen. WC */
6286#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
6287 (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
6288 generate pcie_err_attn output when this error is seen.. WC */
6289#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
6290 (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
6291 generate pcie_err_attn output when this error is seen.. WC */
6292#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
6293 (1 << 15) /* Receiver Overflow Status Status in function 6, if \
6294 set, generate pcie_err_attn output when this error is seen.. WC \
6295 */
6296#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
6297 (1 << 14) /* Unexpected Completion Status Status in function 6, \
6298 if set, generate pcie_err_attn output when this error is seen. WC \
6299 */
6300#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
6301 (1 << 13) /* Receive UR Statusin function 6. If set, generate \
6302 pcie_err_attn output when this error is seen. WC */
6303#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
6304 (1 << 12) /* Completer Timeout Status Status in function 6, if \
6305 set, generate pcie_err_attn output when this error is seen. WC */
6306#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
6307 (1 << 11) /* Flow Control Protocol Error Status Status in \
6308 function 6, if set, generate pcie_err_attn output when this error \
6309 is seen. WC */
6310#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
6311 (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
6312 generate pcie_err_attn output when this error is seen.. WC */
6313#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
6314#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
6315 (1 << 8) /* Unsupported Request Error Status for Function 5, if \
6316 set, generate pcie_err_attn output when this error is seen. WC */
6317#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
6318 (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
6319 generate pcie_err_attn output when this error is seen.. WC */
6320#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
6321 (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
6322 generate pcie_err_attn output when this error is seen.. WC */
6323#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
6324 (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
6325 set, generate pcie_err_attn output when this error is seen.. WC \
6326 */
6327#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
6328 (1 << 4) /* Unexpected Completion Status Status for Function 5, \
6329 if set, generate pcie_err_attn output when this error is seen. WC \
6330 */
6331#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
6332 (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
6333 pcie_err_attn output when this error is seen. WC */
6334#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
6335 (1 << 2) /* Completer Timeout Status Status for Function 5, if \
6336 set, generate pcie_err_attn output when this error is seen. WC */
6337#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
6338 (1 << 1) /* Flow Control Protocol Error Status Status for \
6339 Function 5, if set, generate pcie_err_attn output when this error \
6340 is seen. WC */
6341#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
6342 (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
6343 generate pcie_err_attn output when this error is seen.. WC */
6344
6345
6346#define BAR_USTRORM_INTMEM 0x400000
6347#define BAR_CSTRORM_INTMEM 0x410000
6348#define BAR_XSTRORM_INTMEM 0x420000
6349#define BAR_TSTRORM_INTMEM 0x430000
6350
6351/* for accessing the IGU in case of status block ACK */
6352#define BAR_IGU_INTMEM 0x440000
6353
6354#define BAR_DOORBELL_OFFSET 0x800000
6355
6356#define BAR_ME_REGISTER 0x450000
6357#define ME_REG_PF_NUM_SHIFT 0
6358#define ME_REG_PF_NUM\
6359 (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
6360#define ME_REG_VF_VALID (1<<8)
6361#define ME_REG_VF_NUM_SHIFT 9
6362#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
6363#define ME_REG_VF_ERR (0x1<<3)
6364#define ME_REG_ABS_PF_NUM_SHIFT 16
6365#define ME_REG_ABS_PF_NUM\
6366 (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
6367
6368
6369#define MDIO_REG_BANK_CL73_IEEEB0 0x0
6370#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
6371#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
6372#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
6373#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
6374
6375#define MDIO_REG_BANK_CL73_IEEEB1 0x10
6376#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
6377#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
6378#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
6379#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
6380#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
6381#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
6382#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
6383#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
6384#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
6385#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
6386#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
6387#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
6388#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
6389#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
6390#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
6391
6392#define MDIO_REG_BANK_RX0 0x80b0
6393#define MDIO_RX0_RX_STATUS 0x10
6394#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
6395#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
6396#define MDIO_RX0_RX_EQ_BOOST 0x1c
6397#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
6398#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
6399
6400#define MDIO_REG_BANK_RX1 0x80c0
6401#define MDIO_RX1_RX_EQ_BOOST 0x1c
6402#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
6403#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
6404
6405#define MDIO_REG_BANK_RX2 0x80d0
6406#define MDIO_RX2_RX_EQ_BOOST 0x1c
6407#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
6408#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
6409
6410#define MDIO_REG_BANK_RX3 0x80e0
6411#define MDIO_RX3_RX_EQ_BOOST 0x1c
6412#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
6413#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
6414
6415#define MDIO_REG_BANK_RX_ALL 0x80f0
6416#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
6417#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
6418#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
6419
6420#define MDIO_REG_BANK_TX0 0x8060
6421#define MDIO_TX0_TX_DRIVER 0x17
6422#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
6423#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
6424#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
6425#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
6426#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
6427#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
6428#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
6429#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
6430#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
6431
6432#define MDIO_REG_BANK_TX1 0x8070
6433#define MDIO_TX1_TX_DRIVER 0x17
6434#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
6435#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
6436#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
6437#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
6438#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
6439#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
6440#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
6441#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
6442#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
6443
6444#define MDIO_REG_BANK_TX2 0x8080
6445#define MDIO_TX2_TX_DRIVER 0x17
6446#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
6447#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
6448#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
6449#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
6450#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
6451#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
6452#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
6453#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
6454#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
6455
6456#define MDIO_REG_BANK_TX3 0x8090
6457#define MDIO_TX3_TX_DRIVER 0x17
6458#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
6459#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
6460#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
6461#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
6462#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
6463#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
6464#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
6465#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
6466#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
6467
6468#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
6469#define MDIO_BLOCK0_XGXS_CONTROL 0x10
6470
6471#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
6472#define MDIO_BLOCK1_LANE_CTRL0 0x15
6473#define MDIO_BLOCK1_LANE_CTRL1 0x16
6474#define MDIO_BLOCK1_LANE_CTRL2 0x17
6475#define MDIO_BLOCK1_LANE_PRBS 0x19
6476
6477#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
6478#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
6479#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
6480#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
6481#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
6482#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
6483#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
6484#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
6485#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
6486#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
6487
6488#define MDIO_REG_BANK_GP_STATUS 0x8120
6489#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
6490#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
6491#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
6492#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
6493#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
6494#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
6495#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
6496#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
6497#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
6498#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
6499#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
6500#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
6501#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
6502#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
6503#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
6504#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
6505#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
6506#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
6507#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
6508#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
6509#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
6510#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
6511#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
6512#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
6513#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
6514#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
6515#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
6516#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
6517#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
6518
6519
6520#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
6521#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
6522#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
6523#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
6524#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
6525#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
6526#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
6527
6528#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
6529#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
6530#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
6531#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
6532#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
6533#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
6534#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
6535#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
6536#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
6537#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
6538#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
6539#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
6540#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
6541#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
6542#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
6543#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
6544#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
6545#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
6546#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
6547#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
6548#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
6549#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
6550#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
6551#define MDIO_SERDES_DIGITAL_MISC1 0x18
6552#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
6553#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
6554#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
6555#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
6556#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
6557#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
6558#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
6559#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
6560#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
6561#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
6562#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
6563#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
6564#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
6565#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
6566#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
6567#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
6568#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
6569#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
6570
6571#define MDIO_REG_BANK_OVER_1G 0x8320
6572#define MDIO_OVER_1G_DIGCTL_3_4 0x14
6573#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
6574#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
6575#define MDIO_OVER_1G_UP1 0x19
6576#define MDIO_OVER_1G_UP1_2_5G 0x0001
6577#define MDIO_OVER_1G_UP1_5G 0x0002
6578#define MDIO_OVER_1G_UP1_6G 0x0004
6579#define MDIO_OVER_1G_UP1_10G 0x0010
6580#define MDIO_OVER_1G_UP1_10GH 0x0008
6581#define MDIO_OVER_1G_UP1_12G 0x0020
6582#define MDIO_OVER_1G_UP1_12_5G 0x0040
6583#define MDIO_OVER_1G_UP1_13G 0x0080
6584#define MDIO_OVER_1G_UP1_15G 0x0100
6585#define MDIO_OVER_1G_UP1_16G 0x0200
6586#define MDIO_OVER_1G_UP2 0x1A
6587#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
6588#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
6589#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
6590#define MDIO_OVER_1G_UP3 0x1B
6591#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
6592#define MDIO_OVER_1G_LP_UP1 0x1C
6593#define MDIO_OVER_1G_LP_UP2 0x1D
6594#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
6595#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
6596#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
6597#define MDIO_OVER_1G_LP_UP3 0x1E
6598
6599#define MDIO_REG_BANK_REMOTE_PHY 0x8330
6600#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
6601#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
6602#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
6603
6604#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
6605#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
6606#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
6607#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
6608
6609#define MDIO_REG_BANK_CL73_USERB0 0x8370
6610#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
6611#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
6612#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
6613#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
6614#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
6615#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
6616#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
6617#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
6618#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
6619#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
6620#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
6621
6622#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
6623#define MDIO_AER_BLOCK_AER_REG 0x1E
6624
6625#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
6626#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
6627#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
6628#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
6629#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
6630#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
6631#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
6632#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
6633#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
6634#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
6635#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
6636#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
6637#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
6638#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
6639#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
6640#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
6641#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
6642#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
6643#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
6644#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
6645#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
6646#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
6647#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
6648#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
6649#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
6650#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
6651#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
6652#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
6653#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
6654#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
6655#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
6656/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
6657bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
6658Theotherbitsarereservedandshouldbezero*/
6659#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
6660
6661
6662#define MDIO_PMA_DEVAD 0x1
6663/*ieee*/
6664#define MDIO_PMA_REG_CTRL 0x0
6665#define MDIO_PMA_REG_STATUS 0x1
6666#define MDIO_PMA_REG_10G_CTRL2 0x7
6667#define MDIO_PMA_REG_TX_DISABLE 0x0009
6668#define MDIO_PMA_REG_RX_SD 0xa
6669/*bcm*/
6670#define MDIO_PMA_REG_BCM_CTRL 0x0096
6671#define MDIO_PMA_REG_FEC_CTRL 0x00ab
6672#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
6673#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
6674#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
6675#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
6676#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
6677#define MDIO_PMA_REG_MISC_CTRL 0xca0a
6678#define MDIO_PMA_REG_GEN_CTRL 0xca10
6679#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
6680#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
6681#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
6682#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
6683#define MDIO_PMA_REG_ROM_VER1 0xca19
6684#define MDIO_PMA_REG_ROM_VER2 0xca1a
6685#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
6686#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
6687#define MDIO_PMA_REG_PLL_CTRL 0xca1e
6688#define MDIO_PMA_REG_MISC_CTRL0 0xca23
6689#define MDIO_PMA_REG_LRM_MODE 0xca3f
6690#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
6691#define MDIO_PMA_REG_MISC_CTRL1 0xca85
6692
6693#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
6694#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
6695#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
6696#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
6697#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
6698#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
6699#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
6700#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
6701#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
6702#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
6703#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
6704#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
6705
6706#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
6707#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
6708#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
6709#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
6710#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
6711#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
6712#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
6713#define MDIO_PMA_REG_8727_PCS_GP 0xc842
6714#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
6715
6716#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
6717
6718#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
6719#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
6720#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
6721#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
6722
6723#define MDIO_PMA_REG_7101_RESET 0xc000
6724#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
6725#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
6726#define MDIO_PMA_REG_7101_VER1 0xc026
6727#define MDIO_PMA_REG_7101_VER2 0xc027
6728
6729#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
6730#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
6731#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
6732#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
6733#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
6734#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
6735#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
6736#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
6737#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
6738#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
6739
6740
6741#define MDIO_WIS_DEVAD 0x2
6742/*bcm*/
6743#define MDIO_WIS_REG_LASI_CNTL 0x9002
6744#define MDIO_WIS_REG_LASI_STATUS 0x9005
6745
6746#define MDIO_PCS_DEVAD 0x3
6747#define MDIO_PCS_REG_STATUS 0x0020
6748#define MDIO_PCS_REG_LASI_STATUS 0x9005
6749#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
6750#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
6751#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
6752#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
6753#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
6754#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
6755#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
6756#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
6757#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
6758
6759
6760#define MDIO_XS_DEVAD 0x4
6761#define MDIO_XS_PLL_SEQUENCER 0x8000
6762#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
6763
6764#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
6765#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
6766#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
6767#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
6768#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
6769
6770#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
6771
6772#define MDIO_AN_DEVAD 0x7
6773/*ieee*/
6774#define MDIO_AN_REG_CTRL 0x0000
6775#define MDIO_AN_REG_STATUS 0x0001
6776#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
6777#define MDIO_AN_REG_ADV_PAUSE 0x0010
6778#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
6779#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
6780#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
6781#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
6782#define MDIO_AN_REG_ADV 0x0011
6783#define MDIO_AN_REG_ADV2 0x0012
6784#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
6785#define MDIO_AN_REG_MASTER_STATUS 0x0021
6786/*bcm*/
6787#define MDIO_AN_REG_LINK_STATUS 0x8304
6788#define MDIO_AN_REG_CL37_CL73 0x8370
6789#define MDIO_AN_REG_CL37_AN 0xffe0
6790#define MDIO_AN_REG_CL37_FC_LD 0xffe4
6791#define MDIO_AN_REG_CL37_FC_LP 0xffe5
6792
6793#define MDIO_AN_REG_8073_2_5G 0x8329
6794#define MDIO_AN_REG_8073_BAM 0x8350
6795
6796#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
6797#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
6798#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
6799#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
6800#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
6801#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
6802#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
6803#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
6804#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
6805#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
6806
6807/* BCM84823 only */
6808#define MDIO_CTL_DEVAD 0x1e
6809#define MDIO_CTL_REG_84823_MEDIA 0x401a
6810#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
6811 /* These pins configure the BCM84823 interface to MAC after reset. */
6812#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
6813#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
6814 /* These pins configure the BCM84823 interface to Line after reset. */
6815#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
6816#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
6817#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
6818 /* When this pin is active high during reset, 10GBASE-T core is power
6819 * down, When it is active low the 10GBASE-T is power up
6820 */
6821#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
6822#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
6823#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
6824#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
6825#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
6826#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
6827#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
6828
6829#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
6830#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6831
6832/* BCM84833 only */
6833#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
6834#define MDIO_84833_SUPER_ISOLATE 0x8000
6835/* These are mailbox register set used by 84833. */
6836#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
6837#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
6838#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
6839#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
6840#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
6841#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
6842#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
6843
6844/* Mailbox command set used by 84833. */
6845#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
6846/* Mailbox status set used by 84833. */
6847#define PHY84833_CMD_RECEIVED 0x0001
6848#define PHY84833_CMD_IN_PROGRESS 0x0002
6849#define PHY84833_CMD_COMPLETE_PASS 0x0004
6850#define PHY84833_CMD_COMPLETE_ERROR 0x0008
6851#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
6852#define PHY84833_CMD_SYSTEM_BOOT 0x0020
6853#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
6854#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
6855#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
6856
6857
6858/* 84833 F/W Feature Commands */
6859#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
6860#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
6861
6862/* Warpcore clause 45 addressing */
6863#define MDIO_WC_DEVAD 0x3
6864#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
6865#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
6866#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
6867#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
6868#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
6869#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
6870#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
6871#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
6872#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
6873#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
6874#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
6875#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
6876#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
6877#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
6878#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
6879#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
6880#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
6881#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
6882#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
6883#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
6884#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
6885#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
6886#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
6887#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
6888#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
6889#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
6890#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
6891#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
6892#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
6893#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
6894#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
6895#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
6896#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
6897#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
6898#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
6899#define MDIO_WC_REG_XGXS_STATUS3 0x8129
6900#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
6901#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
6902#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
6903#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
6904#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
6905#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
6906#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
6907#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
6908#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
6909#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
6910#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
6911#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
6912#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
6913#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
6914#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
6915#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
6916#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
6917#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
6918#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
6919#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
6920#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
6921#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
6922#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
6923#define MDIO_WC_REG_DSC_SMC 0x8213
6924#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
6925#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
6926#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
6927#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
6928#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
6929#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
6930#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
6931#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
6932#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
6933#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
6934#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
6935#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
6936#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
6937#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
6938#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
6939#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
6940#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
6941#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
6942#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
6943#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
6944#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
6945#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
6946#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
6947#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
6948#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
6949#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
6950#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
6951#define MDIO_WC_REG_TX66_CONTROL 0x83b0
6952#define MDIO_WC_REG_RX66_CONTROL 0x83c0
6953#define MDIO_WC_REG_RX66_SCW0 0x83c2
6954#define MDIO_WC_REG_RX66_SCW1 0x83c3
6955#define MDIO_WC_REG_RX66_SCW2 0x83c4
6956#define MDIO_WC_REG_RX66_SCW3 0x83c5
6957#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
6958#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
6959#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
6960#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
6961#define MDIO_WC_REG_FX100_CTRL1 0x8400
6962#define MDIO_WC_REG_FX100_CTRL3 0x8402
6963
6964#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
6965#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
6966#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
6967
6968#define MDIO_WC_REG_AERBLK_AER 0xffde
6969#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
6970#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
6971
6972#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
6973#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
6974#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
6975
6976#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
6977
6978#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
6979
6980/* 54618se */
6981#define MDIO_REG_GPHY_PHYID_LSB 0x3
6982#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
6983#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
6984#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
6985#define MDIO_REG_GPHY_EEE_ADV 0x3c
6986#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
6987#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
6988#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
6989#define MDIO_REG_INTR_STATUS 0x1a
6990#define MDIO_REG_INTR_MASK 0x1b
6991#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
6992#define MDIO_REG_GPHY_SHADOW 0x1c
6993#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
6994#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
6995#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
6996#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
6997
6998#define IGU_FUNC_BASE 0x0400
6999
7000#define IGU_ADDR_MSIX 0x0000
7001#define IGU_ADDR_INT_ACK 0x0200
7002#define IGU_ADDR_PROD_UPD 0x0201
7003#define IGU_ADDR_ATTN_BITS_UPD 0x0202
7004#define IGU_ADDR_ATTN_BITS_SET 0x0203
7005#define IGU_ADDR_ATTN_BITS_CLR 0x0204
7006#define IGU_ADDR_COALESCE_NOW 0x0205
7007#define IGU_ADDR_SIMD_MASK 0x0206
7008#define IGU_ADDR_SIMD_NOMASK 0x0207
7009#define IGU_ADDR_MSI_CTL 0x0210
7010#define IGU_ADDR_MSI_ADDR_LO 0x0211
7011#define IGU_ADDR_MSI_ADDR_HI 0x0212
7012#define IGU_ADDR_MSI_DATA 0x0213
7013
7014#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
7015#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
7016#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
7017#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
7018
7019#define COMMAND_REG_INT_ACK 0x0
7020#define COMMAND_REG_PROD_UPD 0x4
7021#define COMMAND_REG_ATTN_BITS_UPD 0x8
7022#define COMMAND_REG_ATTN_BITS_SET 0xc
7023#define COMMAND_REG_ATTN_BITS_CLR 0x10
7024#define COMMAND_REG_COALESCE_NOW 0x14
7025#define COMMAND_REG_SIMD_MASK 0x18
7026#define COMMAND_REG_SIMD_NOMASK 0x1c
7027
7028
7029#define IGU_MEM_BASE 0x0000
7030
7031#define IGU_MEM_MSIX_BASE 0x0000
7032#define IGU_MEM_MSIX_UPPER 0x007f
7033#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
7034
7035#define IGU_MEM_PBA_MSIX_BASE 0x0200
7036#define IGU_MEM_PBA_MSIX_UPPER 0x0200
7037
7038#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
7039#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
7040
7041#define IGU_CMD_INT_ACK_BASE 0x0400
7042#define IGU_CMD_INT_ACK_UPPER\
7043 (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
7044#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
7045
7046#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
7047#define IGU_CMD_E2_PROD_UPD_UPPER\
7048 (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
7049#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
7050
7051#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
7052#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
7053#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
7054
7055#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
7056#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
7057#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
7058#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
7059
7060#define IGU_REG_RESERVED_UPPER 0x05ff
7061/* Fields of IGU PF CONFIGRATION REGISTER */
7062#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
7063#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
7064#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
7065#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
7066#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
7067#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
7068
7069/* Fields of IGU VF CONFIGRATION REGISTER */
7070#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
7071#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
7072#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
7073#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
7074#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
7075
7076
7077#define IGU_BC_DSB_NUM_SEGS 5
7078#define IGU_BC_NDSB_NUM_SEGS 2
7079#define IGU_NORM_DSB_NUM_SEGS 2
7080#define IGU_NORM_NDSB_NUM_SEGS 1
7081#define IGU_BC_BASE_DSB_PROD 128
7082#define IGU_NORM_BASE_DSB_PROD 136
7083
7084 /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
7085 [5:2] = 0; [1:0] = PF number) */
7086#define IGU_FID_ENCODE_IS_PF (0x1<<6)
7087#define IGU_FID_ENCODE_IS_PF_SHIFT 6
7088#define IGU_FID_VF_NUM_MASK (0x3f)
7089#define IGU_FID_PF_NUM_MASK (0x7)
7090
7091#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
7092#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
7093#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
7094#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
7095#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
7096
7097
7098#define CDU_REGION_NUMBER_XCM_AG 2
7099#define CDU_REGION_NUMBER_UCM_AG 4
7100
7101
7102/**
7103 * String-to-compress [31:8] = CID (all 24 bits)
7104 * String-to-compress [7:4] = Region
7105 * String-to-compress [3:0] = Type
7106 */
7107#define CDU_VALID_DATA(_cid, _region, _type)\
7108 (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
7109#define CDU_CRC8(_cid, _region, _type)\
7110 (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
7111#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
7112 (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
7113#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
7114 (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
7115#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
7116
7117/******************************************************************************
7118 * Description:
7119 * Calculates crc 8 on a word value: polynomial 0-1-2-8
7120 * Code was translated from Verilog.
7121 * Return:
7122 *****************************************************************************/
7123static inline u8 calc_crc8(u32 data, u8 crc)
7124{
7125 u8 D[32];
7126 u8 NewCRC[8];
7127 u8 C[8];
7128 u8 crc_res;
7129 u8 i;
7130
7131 /* split the data into 31 bits */
7132 for (i = 0; i < 32; i++) {
7133 D[i] = (u8)(data & 1);
7134 data = data >> 1;
7135 }
7136
7137 /* split the crc into 8 bits */
7138 for (i = 0; i < 8; i++) {
7139 C[i] = crc & 1;
7140 crc = crc >> 1;
7141 }
7142
7143 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
7144 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
7145 C[6] ^ C[7];
7146 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
7147 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
7148 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
7149 C[6];
7150 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
7151 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
7152 C[0] ^ C[1] ^ C[4] ^ C[5];
7153 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
7154 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
7155 C[1] ^ C[2] ^ C[5] ^ C[6];
7156 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
7157 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
7158 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
7159 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
7160 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
7161 C[3] ^ C[4] ^ C[7];
7162 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
7163 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
7164 C[5];
7165 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
7166 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
7167 C[6];
7168
7169 crc_res = 0;
7170 for (i = 0; i < 8; i++)
7171 crc_res |= (NewCRC[i] << i);
7172
7173 return crc_res;
7174}
7175
7176
7177#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c
new file mode 100644
index 00000000000..df52f110c6c
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.c
@@ -0,0 +1,5692 @@
1/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
3 * Copyright 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
19#include <linux/module.h>
20#include <linux/crc32.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/crc32c.h>
24#include "bnx2x.h"
25#include "bnx2x_cmn.h"
26#include "bnx2x_sp.h"
27
28#define BNX2X_MAX_EMUL_MULTI 16
29
30/**** Exe Queue interfaces ****/
31
32/**
33 * bnx2x_exe_queue_init - init the Exe Queue object
34 *
35 * @o: poiter to the object
36 * @exe_len: length
37 * @owner: poiter to the owner
38 * @validate: validate function pointer
39 * @optimize: optimize function pointer
40 * @exec: execute function pointer
41 * @get: get function pointer
42 */
43static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
44 struct bnx2x_exe_queue_obj *o,
45 int exe_len,
46 union bnx2x_qable_obj *owner,
47 exe_q_validate validate,
48 exe_q_optimize optimize,
49 exe_q_execute exec,
50 exe_q_get get)
51{
52 memset(o, 0, sizeof(*o));
53
54 INIT_LIST_HEAD(&o->exe_queue);
55 INIT_LIST_HEAD(&o->pending_comp);
56
57 spin_lock_init(&o->lock);
58
59 o->exe_chunk_len = exe_len;
60 o->owner = owner;
61
62 /* Owner specific callbacks */
63 o->validate = validate;
64 o->optimize = optimize;
65 o->execute = exec;
66 o->get = get;
67
68 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
69 "length of %d\n", exe_len);
70}
71
72static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
73 struct bnx2x_exeq_elem *elem)
74{
75 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
76 kfree(elem);
77}
78
79static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
80{
81 struct bnx2x_exeq_elem *elem;
82 int cnt = 0;
83
84 spin_lock_bh(&o->lock);
85
86 list_for_each_entry(elem, &o->exe_queue, link)
87 cnt++;
88
89 spin_unlock_bh(&o->lock);
90
91 return cnt;
92}
93
94/**
95 * bnx2x_exe_queue_add - add a new element to the execution queue
96 *
97 * @bp: driver handle
98 * @o: queue
99 * @cmd: new command to add
100 * @restore: true - do not optimize the command
101 *
102 * If the element is optimized or is illegal, frees it.
103 */
104static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
105 struct bnx2x_exe_queue_obj *o,
106 struct bnx2x_exeq_elem *elem,
107 bool restore)
108{
109 int rc;
110
111 spin_lock_bh(&o->lock);
112
113 if (!restore) {
114 /* Try to cancel this element queue */
115 rc = o->optimize(bp, o->owner, elem);
116 if (rc)
117 goto free_and_exit;
118
119 /* Check if this request is ok */
120 rc = o->validate(bp, o->owner, elem);
121 if (rc) {
122 BNX2X_ERR("Preamble failed: %d\n", rc);
123 goto free_and_exit;
124 }
125 }
126
127 /* If so, add it to the execution queue */
128 list_add_tail(&elem->link, &o->exe_queue);
129
130 spin_unlock_bh(&o->lock);
131
132 return 0;
133
134free_and_exit:
135 bnx2x_exe_queue_free_elem(bp, elem);
136
137 spin_unlock_bh(&o->lock);
138
139 return rc;
140
141}
142
143static inline void __bnx2x_exe_queue_reset_pending(
144 struct bnx2x *bp,
145 struct bnx2x_exe_queue_obj *o)
146{
147 struct bnx2x_exeq_elem *elem;
148
149 while (!list_empty(&o->pending_comp)) {
150 elem = list_first_entry(&o->pending_comp,
151 struct bnx2x_exeq_elem, link);
152
153 list_del(&elem->link);
154 bnx2x_exe_queue_free_elem(bp, elem);
155 }
156}
157
158static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
159 struct bnx2x_exe_queue_obj *o)
160{
161
162 spin_lock_bh(&o->lock);
163
164 __bnx2x_exe_queue_reset_pending(bp, o);
165
166 spin_unlock_bh(&o->lock);
167
168}
169
170/**
171 * bnx2x_exe_queue_step - execute one execution chunk atomically
172 *
173 * @bp: driver handle
174 * @o: queue
175 * @ramrod_flags: flags
176 *
177 * (Atomicy is ensured using the exe_queue->lock).
178 */
179static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
180 struct bnx2x_exe_queue_obj *o,
181 unsigned long *ramrod_flags)
182{
183 struct bnx2x_exeq_elem *elem, spacer;
184 int cur_len = 0, rc;
185
186 memset(&spacer, 0, sizeof(spacer));
187
188 spin_lock_bh(&o->lock);
189
190 /*
191 * Next step should not be performed until the current is finished,
192 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
193 * properly clear object internals without sending any command to the FW
194 * which also implies there won't be any completion to clear the
195 * 'pending' list.
196 */
197 if (!list_empty(&o->pending_comp)) {
198 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
199 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
200 "resetting pending_comp\n");
201 __bnx2x_exe_queue_reset_pending(bp, o);
202 } else {
203 spin_unlock_bh(&o->lock);
204 return 1;
205 }
206 }
207
208 /*
209 * Run through the pending commands list and create a next
210 * execution chunk.
211 */
212 while (!list_empty(&o->exe_queue)) {
213 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
214 link);
215 WARN_ON(!elem->cmd_len);
216
217 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
218 cur_len += elem->cmd_len;
219 /*
220 * Prevent from both lists being empty when moving an
221 * element. This will allow the call of
222 * bnx2x_exe_queue_empty() without locking.
223 */
224 list_add_tail(&spacer.link, &o->pending_comp);
225 mb();
226 list_del(&elem->link);
227 list_add_tail(&elem->link, &o->pending_comp);
228 list_del(&spacer.link);
229 } else
230 break;
231 }
232
233 /* Sanity check */
234 if (!cur_len) {
235 spin_unlock_bh(&o->lock);
236 return 0;
237 }
238
239 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
240 if (rc < 0)
241 /*
242 * In case of an error return the commands back to the queue
243 * and reset the pending_comp.
244 */
245 list_splice_init(&o->pending_comp, &o->exe_queue);
246 else if (!rc)
247 /*
248 * If zero is returned, means there are no outstanding pending
249 * completions and we may dismiss the pending list.
250 */
251 __bnx2x_exe_queue_reset_pending(bp, o);
252
253 spin_unlock_bh(&o->lock);
254 return rc;
255}
256
257static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
258{
259 bool empty = list_empty(&o->exe_queue);
260
261 /* Don't reorder!!! */
262 mb();
263
264 return empty && list_empty(&o->pending_comp);
265}
266
267static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
268 struct bnx2x *bp)
269{
270 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
271 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
272}
273
274/************************ raw_obj functions ***********************************/
275static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
276{
277 return !!test_bit(o->state, o->pstate);
278}
279
280static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
281{
282 smp_mb__before_clear_bit();
283 clear_bit(o->state, o->pstate);
284 smp_mb__after_clear_bit();
285}
286
287static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
288{
289 smp_mb__before_clear_bit();
290 set_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292}
293
294/**
295 * bnx2x_state_wait - wait until the given bit(state) is cleared
296 *
297 * @bp: device handle
298 * @state: state which is to be cleared
299 * @state_p: state buffer
300 *
301 */
302static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
303 unsigned long *pstate)
304{
305 /* can take a while if any port is running */
306 int cnt = 5000;
307
308
309 if (CHIP_REV_IS_EMUL(bp))
310 cnt *= 20;
311
312 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
313
314 might_sleep();
315 while (cnt--) {
316 if (!test_bit(state, pstate)) {
317#ifdef BNX2X_STOP_ON_ERROR
318 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
319#endif
320 return 0;
321 }
322
323 usleep_range(1000, 1000);
324
325 if (bp->panic)
326 return -EIO;
327 }
328
329 /* timeout! */
330 BNX2X_ERR("timeout waiting for state %d\n", state);
331#ifdef BNX2X_STOP_ON_ERROR
332 bnx2x_panic();
333#endif
334
335 return -EBUSY;
336}
337
338static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
339{
340 return bnx2x_state_wait(bp, raw->state, raw->pstate);
341}
342
343/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
344/* credit handling callbacks */
345static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
346{
347 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
348
349 WARN_ON(!mp);
350
351 return mp->get_entry(mp, offset);
352}
353
354static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
355{
356 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
357
358 WARN_ON(!mp);
359
360 return mp->get(mp, 1);
361}
362
363static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
364{
365 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
366
367 WARN_ON(!vp);
368
369 return vp->get_entry(vp, offset);
370}
371
372static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
373{
374 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
375
376 WARN_ON(!vp);
377
378 return vp->get(vp, 1);
379}
380
381static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
382{
383 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
384 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
385
386 if (!mp->get(mp, 1))
387 return false;
388
389 if (!vp->get(vp, 1)) {
390 mp->put(mp, 1);
391 return false;
392 }
393
394 return true;
395}
396
397static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
398{
399 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
400
401 return mp->put_entry(mp, offset);
402}
403
404static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
405{
406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
407
408 return mp->put(mp, 1);
409}
410
411static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
412{
413 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
414
415 return vp->put_entry(vp, offset);
416}
417
418static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
419{
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
421
422 return vp->put(vp, 1);
423}
424
425static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
426{
427 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
428 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
429
430 if (!mp->put(mp, 1))
431 return false;
432
433 if (!vp->put(vp, 1)) {
434 mp->get(mp, 1);
435 return false;
436 }
437
438 return true;
439}
440
441/* check_add() callbacks */
442static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
443 union bnx2x_classification_ramrod_data *data)
444{
445 struct bnx2x_vlan_mac_registry_elem *pos;
446
447 if (!is_valid_ether_addr(data->mac.mac))
448 return -EINVAL;
449
450 /* Check if a requested MAC already exists */
451 list_for_each_entry(pos, &o->head, link)
452 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
453 return -EEXIST;
454
455 return 0;
456}
457
458static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
459 union bnx2x_classification_ramrod_data *data)
460{
461 struct bnx2x_vlan_mac_registry_elem *pos;
462
463 list_for_each_entry(pos, &o->head, link)
464 if (data->vlan.vlan == pos->u.vlan.vlan)
465 return -EEXIST;
466
467 return 0;
468}
469
470static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
471 union bnx2x_classification_ramrod_data *data)
472{
473 struct bnx2x_vlan_mac_registry_elem *pos;
474
475 list_for_each_entry(pos, &o->head, link)
476 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
477 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
478 ETH_ALEN)))
479 return -EEXIST;
480
481 return 0;
482}
483
484
485/* check_del() callbacks */
486static struct bnx2x_vlan_mac_registry_elem *
487 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
488 union bnx2x_classification_ramrod_data *data)
489{
490 struct bnx2x_vlan_mac_registry_elem *pos;
491
492 list_for_each_entry(pos, &o->head, link)
493 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
494 return pos;
495
496 return NULL;
497}
498
499static struct bnx2x_vlan_mac_registry_elem *
500 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
501 union bnx2x_classification_ramrod_data *data)
502{
503 struct bnx2x_vlan_mac_registry_elem *pos;
504
505 list_for_each_entry(pos, &o->head, link)
506 if (data->vlan.vlan == pos->u.vlan.vlan)
507 return pos;
508
509 return NULL;
510}
511
512static struct bnx2x_vlan_mac_registry_elem *
513 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
514 union bnx2x_classification_ramrod_data *data)
515{
516 struct bnx2x_vlan_mac_registry_elem *pos;
517
518 list_for_each_entry(pos, &o->head, link)
519 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
520 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
521 ETH_ALEN)))
522 return pos;
523
524 return NULL;
525}
526
527/* check_move() callback */
528static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
529 struct bnx2x_vlan_mac_obj *dst_o,
530 union bnx2x_classification_ramrod_data *data)
531{
532 struct bnx2x_vlan_mac_registry_elem *pos;
533 int rc;
534
535 /* Check if we can delete the requested configuration from the first
536 * object.
537 */
538 pos = src_o->check_del(src_o, data);
539
540 /* check if configuration can be added */
541 rc = dst_o->check_add(dst_o, data);
542
543 /* If this classification can not be added (is already set)
544 * or can't be deleted - return an error.
545 */
546 if (rc || !pos)
547 return false;
548
549 return true;
550}
551
552static bool bnx2x_check_move_always_err(
553 struct bnx2x_vlan_mac_obj *src_o,
554 struct bnx2x_vlan_mac_obj *dst_o,
555 union bnx2x_classification_ramrod_data *data)
556{
557 return false;
558}
559
560
561static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
562{
563 struct bnx2x_raw_obj *raw = &o->raw;
564 u8 rx_tx_flag = 0;
565
566 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
567 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
568 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
569
570 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
571 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
572 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
573
574 return rx_tx_flag;
575}
576
577/* LLH CAM line allocations */
578enum {
579 LLH_CAM_ISCSI_ETH_LINE = 0,
580 LLH_CAM_ETH_LINE,
581 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
582};
583
584static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
585 bool add, unsigned char *dev_addr, int index)
586{
587 u32 wb_data[2];
588 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
589 NIG_REG_LLH0_FUNC_MEM;
590
591 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
592 return;
593
594 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
595 (add ? "ADD" : "DELETE"), index);
596
597 if (add) {
598 /* LLH_FUNC_MEM is a u64 WB register */
599 reg_offset += 8*index;
600
601 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
602 (dev_addr[4] << 8) | dev_addr[5]);
603 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
604
605 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
606 }
607
608 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
609 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
610}
611
612/**
613 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
614 *
615 * @bp: device handle
616 * @o: queue for which we want to configure this rule
617 * @add: if true the command is an ADD command, DEL otherwise
618 * @opcode: CLASSIFY_RULE_OPCODE_XXX
619 * @hdr: pointer to a header to setup
620 *
621 */
622static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
623 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
624 struct eth_classify_cmd_header *hdr)
625{
626 struct bnx2x_raw_obj *raw = &o->raw;
627
628 hdr->client_id = raw->cl_id;
629 hdr->func_id = raw->func_id;
630
631 /* Rx or/and Tx (internal switching) configuration ? */
632 hdr->cmd_general_data |=
633 bnx2x_vlan_mac_get_rx_tx_flag(o);
634
635 if (add)
636 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
637
638 hdr->cmd_general_data |=
639 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
640}
641
642/**
643 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
644 *
645 * @cid: connection id
646 * @type: BNX2X_FILTER_XXX_PENDING
647 * @hdr: poiter to header to setup
648 * @rule_cnt:
649 *
650 * currently we always configure one rule and echo field to contain a CID and an
651 * opcode type.
652 */
653static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
654 struct eth_classify_header *hdr, int rule_cnt)
655{
656 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
657 hdr->rule_cnt = (u8)rule_cnt;
658}
659
660
661/* hw_config() callbacks */
662static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
663 struct bnx2x_vlan_mac_obj *o,
664 struct bnx2x_exeq_elem *elem, int rule_idx,
665 int cam_offset)
666{
667 struct bnx2x_raw_obj *raw = &o->raw;
668 struct eth_classify_rules_ramrod_data *data =
669 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
670 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
671 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
672 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
673 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
674 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
675
676 /*
677 * Set LLH CAM entry: currently only iSCSI and ETH macs are
678 * relevant. In addition, current implementation is tuned for a
679 * single ETH MAC.
680 *
681 * When multiple unicast ETH MACs PF configuration in switch
682 * independent mode is required (NetQ, multiple netdev MACs,
683 * etc.), consider better utilisation of 8 per function MAC
684 * entries in the LLH register. There is also
685 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
686 * total number of CAM entries to 16.
687 *
688 * Currently we won't configure NIG for MACs other than a primary ETH
689 * MAC and iSCSI L2 MAC.
690 *
691 * If this MAC is moving from one Queue to another, no need to change
692 * NIG configuration.
693 */
694 if (cmd != BNX2X_VLAN_MAC_MOVE) {
695 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
696 bnx2x_set_mac_in_nig(bp, add, mac,
697 LLH_CAM_ISCSI_ETH_LINE);
698 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
699 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
700 }
701
702 /* Reset the ramrod data buffer for the first rule */
703 if (rule_idx == 0)
704 memset(data, 0, sizeof(*data));
705
706 /* Setup a command header */
707 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
708 &rule_entry->mac.header);
709
710 DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
711 "Queue %d\n", (add ? "add" : "delete"),
712 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
713
714 /* Set a MAC itself */
715 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
716 &rule_entry->mac.mac_mid,
717 &rule_entry->mac.mac_lsb, mac);
718
719 /* MOVE: Add a rule that will add this MAC to the target Queue */
720 if (cmd == BNX2X_VLAN_MAC_MOVE) {
721 rule_entry++;
722 rule_cnt++;
723
724 /* Setup ramrod data */
725 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
726 elem->cmd_data.vlan_mac.target_obj,
727 true, CLASSIFY_RULE_OPCODE_MAC,
728 &rule_entry->mac.header);
729
730 /* Set a MAC itself */
731 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
732 &rule_entry->mac.mac_mid,
733 &rule_entry->mac.mac_lsb, mac);
734 }
735
736 /* Set the ramrod data header */
737 /* TODO: take this to the higher level in order to prevent multiple
738 writing */
739 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
740 rule_cnt);
741}
742
743/**
744 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
745 *
746 * @bp: device handle
747 * @o: queue
748 * @type:
749 * @cam_offset: offset in cam memory
750 * @hdr: pointer to a header to setup
751 *
752 * E1/E1H
753 */
754static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
755 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
756 struct mac_configuration_hdr *hdr)
757{
758 struct bnx2x_raw_obj *r = &o->raw;
759
760 hdr->length = 1;
761 hdr->offset = (u8)cam_offset;
762 hdr->client_id = 0xff;
763 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
764}
765
766static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
767 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
768 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
769{
770 struct bnx2x_raw_obj *r = &o->raw;
771 u32 cl_bit_vec = (1 << r->cl_id);
772
773 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
774 cfg_entry->pf_id = r->func_id;
775 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
776
777 if (add) {
778 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
779 T_ETH_MAC_COMMAND_SET);
780 SET_FLAG(cfg_entry->flags,
781 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
782
783 /* Set a MAC in a ramrod data */
784 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
785 &cfg_entry->middle_mac_addr,
786 &cfg_entry->lsb_mac_addr, mac);
787 } else
788 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
789 T_ETH_MAC_COMMAND_INVALIDATE);
790}
791
792static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
793 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
794 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
795{
796 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
797 struct bnx2x_raw_obj *raw = &o->raw;
798
799 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
800 &config->hdr);
801 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
802 cfg_entry);
803
804 DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
805 (add ? "setting" : "clearing"),
806 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
807}
808
809/**
810 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
811 *
812 * @bp: device handle
813 * @o: bnx2x_vlan_mac_obj
814 * @elem: bnx2x_exeq_elem
815 * @rule_idx: rule_idx
816 * @cam_offset: cam_offset
817 */
818static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
819 struct bnx2x_vlan_mac_obj *o,
820 struct bnx2x_exeq_elem *elem, int rule_idx,
821 int cam_offset)
822{
823 struct bnx2x_raw_obj *raw = &o->raw;
824 struct mac_configuration_cmd *config =
825 (struct mac_configuration_cmd *)(raw->rdata);
826 /*
827 * 57710 and 57711 do not support MOVE command,
828 * so it's either ADD or DEL
829 */
830 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
831 true : false;
832
833 /* Reset the ramrod data buffer */
834 memset(config, 0, sizeof(*config));
835
836 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
837 cam_offset, add,
838 elem->cmd_data.vlan_mac.u.mac.mac, 0,
839 ETH_VLAN_FILTER_ANY_VLAN, config);
840}
841
842static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
843 struct bnx2x_vlan_mac_obj *o,
844 struct bnx2x_exeq_elem *elem, int rule_idx,
845 int cam_offset)
846{
847 struct bnx2x_raw_obj *raw = &o->raw;
848 struct eth_classify_rules_ramrod_data *data =
849 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
850 int rule_cnt = rule_idx + 1;
851 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
852 int cmd = elem->cmd_data.vlan_mac.cmd;
853 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
854 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
855
856 /* Reset the ramrod data buffer for the first rule */
857 if (rule_idx == 0)
858 memset(data, 0, sizeof(*data));
859
860 /* Set a rule header */
861 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
862 &rule_entry->vlan.header);
863
864 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
865 vlan);
866
867 /* Set a VLAN itself */
868 rule_entry->vlan.vlan = cpu_to_le16(vlan);
869
870 /* MOVE: Add a rule that will add this MAC to the target Queue */
871 if (cmd == BNX2X_VLAN_MAC_MOVE) {
872 rule_entry++;
873 rule_cnt++;
874
875 /* Setup ramrod data */
876 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
877 elem->cmd_data.vlan_mac.target_obj,
878 true, CLASSIFY_RULE_OPCODE_VLAN,
879 &rule_entry->vlan.header);
880
881 /* Set a VLAN itself */
882 rule_entry->vlan.vlan = cpu_to_le16(vlan);
883 }
884
885 /* Set the ramrod data header */
886 /* TODO: take this to the higher level in order to prevent multiple
887 writing */
888 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
889 rule_cnt);
890}
891
892static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
893 struct bnx2x_vlan_mac_obj *o,
894 struct bnx2x_exeq_elem *elem,
895 int rule_idx, int cam_offset)
896{
897 struct bnx2x_raw_obj *raw = &o->raw;
898 struct eth_classify_rules_ramrod_data *data =
899 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
900 int rule_cnt = rule_idx + 1;
901 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
902 int cmd = elem->cmd_data.vlan_mac.cmd;
903 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
904 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
905 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
906
907
908 /* Reset the ramrod data buffer for the first rule */
909 if (rule_idx == 0)
910 memset(data, 0, sizeof(*data));
911
912 /* Set a rule header */
913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
914 &rule_entry->pair.header);
915
916 /* Set VLAN and MAC themselvs */
917 rule_entry->pair.vlan = cpu_to_le16(vlan);
918 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
919 &rule_entry->pair.mac_mid,
920 &rule_entry->pair.mac_lsb, mac);
921
922 /* MOVE: Add a rule that will add this MAC to the target Queue */
923 if (cmd == BNX2X_VLAN_MAC_MOVE) {
924 rule_entry++;
925 rule_cnt++;
926
927 /* Setup ramrod data */
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
929 elem->cmd_data.vlan_mac.target_obj,
930 true, CLASSIFY_RULE_OPCODE_PAIR,
931 &rule_entry->pair.header);
932
933 /* Set a VLAN itself */
934 rule_entry->pair.vlan = cpu_to_le16(vlan);
935 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
936 &rule_entry->pair.mac_mid,
937 &rule_entry->pair.mac_lsb, mac);
938 }
939
940 /* Set the ramrod data header */
941 /* TODO: take this to the higher level in order to prevent multiple
942 writing */
943 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
944 rule_cnt);
945}
946
947/**
948 * bnx2x_set_one_vlan_mac_e1h -
949 *
950 * @bp: device handle
951 * @o: bnx2x_vlan_mac_obj
952 * @elem: bnx2x_exeq_elem
953 * @rule_idx: rule_idx
954 * @cam_offset: cam_offset
955 */
956static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
957 struct bnx2x_vlan_mac_obj *o,
958 struct bnx2x_exeq_elem *elem,
959 int rule_idx, int cam_offset)
960{
961 struct bnx2x_raw_obj *raw = &o->raw;
962 struct mac_configuration_cmd *config =
963 (struct mac_configuration_cmd *)(raw->rdata);
964 /*
965 * 57710 and 57711 do not support MOVE command,
966 * so it's either ADD or DEL
967 */
968 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
969 true : false;
970
971 /* Reset the ramrod data buffer */
972 memset(config, 0, sizeof(*config));
973
974 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
975 cam_offset, add,
976 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
977 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
978 ETH_VLAN_FILTER_CLASSIFY, config);
979}
980
981#define list_next_entry(pos, member) \
982 list_entry((pos)->member.next, typeof(*(pos)), member)
983
984/**
985 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
986 *
987 * @bp: device handle
988 * @p: command parameters
989 * @ppos: pointer to the cooky
990 *
991 * reconfigure next MAC/VLAN/VLAN-MAC element from the
992 * previously configured elements list.
993 *
994 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
995 * into an account
996 *
997 * pointer to the cooky - that should be given back in the next call to make
998 * function handle the next element. If *ppos is set to NULL it will restart the
999 * iterator. If returned *ppos == NULL this means that the last element has been
1000 * handled.
1001 *
1002 */
1003static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1004 struct bnx2x_vlan_mac_ramrod_params *p,
1005 struct bnx2x_vlan_mac_registry_elem **ppos)
1006{
1007 struct bnx2x_vlan_mac_registry_elem *pos;
1008 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1009
1010 /* If list is empty - there is nothing to do here */
1011 if (list_empty(&o->head)) {
1012 *ppos = NULL;
1013 return 0;
1014 }
1015
1016 /* make a step... */
1017 if (*ppos == NULL)
1018 *ppos = list_first_entry(&o->head,
1019 struct bnx2x_vlan_mac_registry_elem,
1020 link);
1021 else
1022 *ppos = list_next_entry(*ppos, link);
1023
1024 pos = *ppos;
1025
1026 /* If it's the last step - return NULL */
1027 if (list_is_last(&pos->link, &o->head))
1028 *ppos = NULL;
1029
1030 /* Prepare a 'user_req' */
1031 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1032
1033 /* Set the command */
1034 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1035
1036 /* Set vlan_mac_flags */
1037 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1038
1039 /* Set a restore bit */
1040 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1041
1042 return bnx2x_config_vlan_mac(bp, p);
1043}
1044
1045/*
1046 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1047 * pointer to an element with a specific criteria and NULL if such an element
1048 * hasn't been found.
1049 */
1050static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1051 struct bnx2x_exe_queue_obj *o,
1052 struct bnx2x_exeq_elem *elem)
1053{
1054 struct bnx2x_exeq_elem *pos;
1055 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1056
1057 /* Check pending for execution commands */
1058 list_for_each_entry(pos, &o->exe_queue, link)
1059 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1060 sizeof(*data)) &&
1061 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1062 return pos;
1063
1064 return NULL;
1065}
1066
1067static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1068 struct bnx2x_exe_queue_obj *o,
1069 struct bnx2x_exeq_elem *elem)
1070{
1071 struct bnx2x_exeq_elem *pos;
1072 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1073
1074 /* Check pending for execution commands */
1075 list_for_each_entry(pos, &o->exe_queue, link)
1076 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1077 sizeof(*data)) &&
1078 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1079 return pos;
1080
1081 return NULL;
1082}
1083
1084static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1085 struct bnx2x_exe_queue_obj *o,
1086 struct bnx2x_exeq_elem *elem)
1087{
1088 struct bnx2x_exeq_elem *pos;
1089 struct bnx2x_vlan_mac_ramrod_data *data =
1090 &elem->cmd_data.vlan_mac.u.vlan_mac;
1091
1092 /* Check pending for execution commands */
1093 list_for_each_entry(pos, &o->exe_queue, link)
1094 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1095 sizeof(*data)) &&
1096 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1097 return pos;
1098
1099 return NULL;
1100}
1101
1102/**
1103 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1104 *
1105 * @bp: device handle
1106 * @qo: bnx2x_qable_obj
1107 * @elem: bnx2x_exeq_elem
1108 *
1109 * Checks that the requested configuration can be added. If yes and if
1110 * requested, consume CAM credit.
1111 *
1112 * The 'validate' is run after the 'optimize'.
1113 *
1114 */
1115static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1116 union bnx2x_qable_obj *qo,
1117 struct bnx2x_exeq_elem *elem)
1118{
1119 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1120 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1121 int rc;
1122
1123 /* Check the registry */
1124 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1125 if (rc) {
1126 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1127 "current registry state\n");
1128 return rc;
1129 }
1130
1131 /*
1132 * Check if there is a pending ADD command for this
1133 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1134 */
1135 if (exeq->get(exeq, elem)) {
1136 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1137 return -EEXIST;
1138 }
1139
1140 /*
1141 * TODO: Check the pending MOVE from other objects where this
1142 * object is a destination object.
1143 */
1144
1145 /* Consume the credit if not requested not to */
1146 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1147 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1148 o->get_credit(o)))
1149 return -EINVAL;
1150
1151 return 0;
1152}
1153
1154/**
1155 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1156 *
1157 * @bp: device handle
1158 * @qo: quable object to check
1159 * @elem: element that needs to be deleted
1160 *
1161 * Checks that the requested configuration can be deleted. If yes and if
1162 * requested, returns a CAM credit.
1163 *
1164 * The 'validate' is run after the 'optimize'.
1165 */
1166static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1167 union bnx2x_qable_obj *qo,
1168 struct bnx2x_exeq_elem *elem)
1169{
1170 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1171 struct bnx2x_vlan_mac_registry_elem *pos;
1172 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1173 struct bnx2x_exeq_elem query_elem;
1174
1175 /* If this classification can not be deleted (doesn't exist)
1176 * - return a BNX2X_EXIST.
1177 */
1178 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1179 if (!pos) {
1180 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1181 "current registry state\n");
1182 return -EEXIST;
1183 }
1184
1185 /*
1186 * Check if there are pending DEL or MOVE commands for this
1187 * MAC/VLAN/VLAN-MAC. Return an error if so.
1188 */
1189 memcpy(&query_elem, elem, sizeof(query_elem));
1190
1191 /* Check for MOVE commands */
1192 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1193 if (exeq->get(exeq, &query_elem)) {
1194 BNX2X_ERR("There is a pending MOVE command already\n");
1195 return -EINVAL;
1196 }
1197
1198 /* Check for DEL commands */
1199 if (exeq->get(exeq, elem)) {
1200 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1201 return -EEXIST;
1202 }
1203
1204 /* Return the credit to the credit pool if not requested not to */
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 o->put_credit(o))) {
1208 BNX2X_ERR("Failed to return a credit\n");
1209 return -EINVAL;
1210 }
1211
1212 return 0;
1213}
1214
1215/**
1216 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1217 *
1218 * @bp: device handle
1219 * @qo: quable object to check (source)
1220 * @elem: element that needs to be moved
1221 *
1222 * Checks that the requested configuration can be moved. If yes and if
1223 * requested, returns a CAM credit.
1224 *
1225 * The 'validate' is run after the 'optimize'.
1226 */
1227static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1228 union bnx2x_qable_obj *qo,
1229 struct bnx2x_exeq_elem *elem)
1230{
1231 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1232 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1233 struct bnx2x_exeq_elem query_elem;
1234 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1235 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1236
1237 /*
1238 * Check if we can perform this operation based on the current registry
1239 * state.
1240 */
1241 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1242 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1243 "current registry state\n");
1244 return -EINVAL;
1245 }
1246
1247 /*
1248 * Check if there is an already pending DEL or MOVE command for the
1249 * source object or ADD command for a destination object. Return an
1250 * error if so.
1251 */
1252 memcpy(&query_elem, elem, sizeof(query_elem));
1253
1254 /* Check DEL on source */
1255 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1256 if (src_exeq->get(src_exeq, &query_elem)) {
1257 BNX2X_ERR("There is a pending DEL command on the source "
1258 "queue already\n");
1259 return -EINVAL;
1260 }
1261
1262 /* Check MOVE on source */
1263 if (src_exeq->get(src_exeq, elem)) {
1264 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1265 return -EEXIST;
1266 }
1267
1268 /* Check ADD on destination */
1269 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1270 if (dest_exeq->get(dest_exeq, &query_elem)) {
1271 BNX2X_ERR("There is a pending ADD command on the "
1272 "destination queue already\n");
1273 return -EINVAL;
1274 }
1275
1276 /* Consume the credit if not requested not to */
1277 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1278 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1279 dest_o->get_credit(dest_o)))
1280 return -EINVAL;
1281
1282 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1283 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1284 src_o->put_credit(src_o))) {
1285 /* return the credit taken from dest... */
1286 dest_o->put_credit(dest_o);
1287 return -EINVAL;
1288 }
1289
1290 return 0;
1291}
1292
1293static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1294 union bnx2x_qable_obj *qo,
1295 struct bnx2x_exeq_elem *elem)
1296{
1297 switch (elem->cmd_data.vlan_mac.cmd) {
1298 case BNX2X_VLAN_MAC_ADD:
1299 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1300 case BNX2X_VLAN_MAC_DEL:
1301 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1302 case BNX2X_VLAN_MAC_MOVE:
1303 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1304 default:
1305 return -EINVAL;
1306 }
1307}
1308
1309/**
1310 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1311 *
1312 * @bp: device handle
1313 * @o: bnx2x_vlan_mac_obj
1314 *
1315 */
1316static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1317 struct bnx2x_vlan_mac_obj *o)
1318{
1319 int cnt = 5000, rc;
1320 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1321 struct bnx2x_raw_obj *raw = &o->raw;
1322
1323 while (cnt--) {
1324 /* Wait for the current command to complete */
1325 rc = raw->wait_comp(bp, raw);
1326 if (rc)
1327 return rc;
1328
1329 /* Wait until there are no pending commands */
1330 if (!bnx2x_exe_queue_empty(exeq))
1331 usleep_range(1000, 1000);
1332 else
1333 return 0;
1334 }
1335
1336 return -EBUSY;
1337}
1338
1339/**
1340 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1341 *
1342 * @bp: device handle
1343 * @o: bnx2x_vlan_mac_obj
1344 * @cqe:
1345 * @cont: if true schedule next execution chunk
1346 *
1347 */
1348static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1349 struct bnx2x_vlan_mac_obj *o,
1350 union event_ring_elem *cqe,
1351 unsigned long *ramrod_flags)
1352{
1353 struct bnx2x_raw_obj *r = &o->raw;
1354 int rc;
1355
1356 /* Reset pending list */
1357 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1358
1359 /* Clear pending */
1360 r->clear_pending(r);
1361
1362 /* If ramrod failed this is most likely a SW bug */
1363 if (cqe->message.error)
1364 return -EINVAL;
1365
1366 /* Run the next bulk of pending commands if requeted */
1367 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1368 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1369 if (rc < 0)
1370 return rc;
1371 }
1372
1373 /* If there is more work to do return PENDING */
1374 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1375 return 1;
1376
1377 return 0;
1378}
1379
1380/**
1381 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1382 *
1383 * @bp: device handle
1384 * @o: bnx2x_qable_obj
1385 * @elem: bnx2x_exeq_elem
1386 */
1387static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1388 union bnx2x_qable_obj *qo,
1389 struct bnx2x_exeq_elem *elem)
1390{
1391 struct bnx2x_exeq_elem query, *pos;
1392 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1393 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1394
1395 memcpy(&query, elem, sizeof(query));
1396
1397 switch (elem->cmd_data.vlan_mac.cmd) {
1398 case BNX2X_VLAN_MAC_ADD:
1399 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1400 break;
1401 case BNX2X_VLAN_MAC_DEL:
1402 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1403 break;
1404 default:
1405 /* Don't handle anything other than ADD or DEL */
1406 return 0;
1407 }
1408
1409 /* If we found the appropriate element - delete it */
1410 pos = exeq->get(exeq, &query);
1411 if (pos) {
1412
1413 /* Return the credit of the optimized command */
1414 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1415 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1416 if ((query.cmd_data.vlan_mac.cmd ==
1417 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1418 BNX2X_ERR("Failed to return the credit for the "
1419 "optimized ADD command\n");
1420 return -EINVAL;
1421 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1422 BNX2X_ERR("Failed to recover the credit from "
1423 "the optimized DEL command\n");
1424 return -EINVAL;
1425 }
1426 }
1427
1428 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1429 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1430 "ADD" : "DEL");
1431
1432 list_del(&pos->link);
1433 bnx2x_exe_queue_free_elem(bp, pos);
1434 return 1;
1435 }
1436
1437 return 0;
1438}
1439
1440/**
1441 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1442 *
1443 * @bp: device handle
1444 * @o:
1445 * @elem:
1446 * @restore:
1447 * @re:
1448 *
1449 * prepare a registry element according to the current command request.
1450 */
1451static inline int bnx2x_vlan_mac_get_registry_elem(
1452 struct bnx2x *bp,
1453 struct bnx2x_vlan_mac_obj *o,
1454 struct bnx2x_exeq_elem *elem,
1455 bool restore,
1456 struct bnx2x_vlan_mac_registry_elem **re)
1457{
1458 int cmd = elem->cmd_data.vlan_mac.cmd;
1459 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1460
1461 /* Allocate a new registry element if needed. */
1462 if (!restore &&
1463 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1464 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1465 if (!reg_elem)
1466 return -ENOMEM;
1467
1468 /* Get a new CAM offset */
1469 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1470 /*
1471 * This shell never happen, because we have checked the
1472 * CAM availiability in the 'validate'.
1473 */
1474 WARN_ON(1);
1475 kfree(reg_elem);
1476 return -EINVAL;
1477 }
1478
1479 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1480
1481 /* Set a VLAN-MAC data */
1482 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1483 sizeof(reg_elem->u));
1484
1485 /* Copy the flags (needed for DEL and RESTORE flows) */
1486 reg_elem->vlan_mac_flags =
1487 elem->cmd_data.vlan_mac.vlan_mac_flags;
1488 } else /* DEL, RESTORE */
1489 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1490
1491 *re = reg_elem;
1492 return 0;
1493}
1494
1495/**
1496 * bnx2x_execute_vlan_mac - execute vlan mac command
1497 *
1498 * @bp: device handle
1499 * @qo:
1500 * @exe_chunk:
1501 * @ramrod_flags:
1502 *
1503 * go and send a ramrod!
1504 */
1505static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1506 union bnx2x_qable_obj *qo,
1507 struct list_head *exe_chunk,
1508 unsigned long *ramrod_flags)
1509{
1510 struct bnx2x_exeq_elem *elem;
1511 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1512 struct bnx2x_raw_obj *r = &o->raw;
1513 int rc, idx = 0;
1514 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1515 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1516 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1517 int cmd;
1518
1519 /*
1520 * If DRIVER_ONLY execution is requested, cleanup a registry
1521 * and exit. Otherwise send a ramrod to FW.
1522 */
1523 if (!drv_only) {
1524 WARN_ON(r->check_pending(r));
1525
1526 /* Set pending */
1527 r->set_pending(r);
1528
1529 /* Fill tha ramrod data */
1530 list_for_each_entry(elem, exe_chunk, link) {
1531 cmd = elem->cmd_data.vlan_mac.cmd;
1532 /*
1533 * We will add to the target object in MOVE command, so
1534 * change the object for a CAM search.
1535 */
1536 if (cmd == BNX2X_VLAN_MAC_MOVE)
1537 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1538 else
1539 cam_obj = o;
1540
1541 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1542 elem, restore,
1543 &reg_elem);
1544 if (rc)
1545 goto error_exit;
1546
1547 WARN_ON(!reg_elem);
1548
1549 /* Push a new entry into the registry */
1550 if (!restore &&
1551 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1552 (cmd == BNX2X_VLAN_MAC_MOVE)))
1553 list_add(&reg_elem->link, &cam_obj->head);
1554
1555 /* Configure a single command in a ramrod data buffer */
1556 o->set_one_rule(bp, o, elem, idx,
1557 reg_elem->cam_offset);
1558
1559 /* MOVE command consumes 2 entries in the ramrod data */
1560 if (cmd == BNX2X_VLAN_MAC_MOVE)
1561 idx += 2;
1562 else
1563 idx++;
1564 }
1565
1566 /*
1567 * No need for an explicit memory barrier here as long we would
1568 * need to ensure the ordering of writing to the SPQ element
1569 * and updating of the SPQ producer which involves a memory
1570 * read and we will have to put a full memory barrier there
1571 * (inside bnx2x_sp_post()).
1572 */
1573
1574 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1575 U64_HI(r->rdata_mapping),
1576 U64_LO(r->rdata_mapping),
1577 ETH_CONNECTION_TYPE);
1578 if (rc)
1579 goto error_exit;
1580 }
1581
1582 /* Now, when we are done with the ramrod - clean up the registry */
1583 list_for_each_entry(elem, exe_chunk, link) {
1584 cmd = elem->cmd_data.vlan_mac.cmd;
1585 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1586 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1587 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1588
1589 WARN_ON(!reg_elem);
1590
1591 o->put_cam_offset(o, reg_elem->cam_offset);
1592 list_del(&reg_elem->link);
1593 kfree(reg_elem);
1594 }
1595 }
1596
1597 if (!drv_only)
1598 return 1;
1599 else
1600 return 0;
1601
1602error_exit:
1603 r->clear_pending(r);
1604
1605 /* Cleanup a registry in case of a failure */
1606 list_for_each_entry(elem, exe_chunk, link) {
1607 cmd = elem->cmd_data.vlan_mac.cmd;
1608
1609 if (cmd == BNX2X_VLAN_MAC_MOVE)
1610 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1611 else
1612 cam_obj = o;
1613
1614 /* Delete all newly added above entries */
1615 if (!restore &&
1616 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1617 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1618 reg_elem = o->check_del(cam_obj,
1619 &elem->cmd_data.vlan_mac.u);
1620 if (reg_elem) {
1621 list_del(&reg_elem->link);
1622 kfree(reg_elem);
1623 }
1624 }
1625 }
1626
1627 return rc;
1628}
1629
1630static inline int bnx2x_vlan_mac_push_new_cmd(
1631 struct bnx2x *bp,
1632 struct bnx2x_vlan_mac_ramrod_params *p)
1633{
1634 struct bnx2x_exeq_elem *elem;
1635 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1636 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1637
1638 /* Allocate the execution queue element */
1639 elem = bnx2x_exe_queue_alloc_elem(bp);
1640 if (!elem)
1641 return -ENOMEM;
1642
1643 /* Set the command 'length' */
1644 switch (p->user_req.cmd) {
1645 case BNX2X_VLAN_MAC_MOVE:
1646 elem->cmd_len = 2;
1647 break;
1648 default:
1649 elem->cmd_len = 1;
1650 }
1651
1652 /* Fill the object specific info */
1653 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1654
1655 /* Try to add a new command to the pending list */
1656 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1657}
1658
1659/**
1660 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1661 *
1662 * @bp: device handle
1663 * @p:
1664 *
1665 */
1666int bnx2x_config_vlan_mac(
1667 struct bnx2x *bp,
1668 struct bnx2x_vlan_mac_ramrod_params *p)
1669{
1670 int rc = 0;
1671 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1672 unsigned long *ramrod_flags = &p->ramrod_flags;
1673 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1674 struct bnx2x_raw_obj *raw = &o->raw;
1675
1676 /*
1677 * Add new elements to the execution list for commands that require it.
1678 */
1679 if (!cont) {
1680 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1681 if (rc)
1682 return rc;
1683 }
1684
1685 /*
1686 * If nothing will be executed further in this iteration we want to
1687 * return PENDING if there are pending commands
1688 */
1689 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1690 rc = 1;
1691
1692 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1693 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1694 "clearing a pending bit.\n");
1695 raw->clear_pending(raw);
1696 }
1697
1698 /* Execute commands if required */
1699 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1700 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1701 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1702 if (rc < 0)
1703 return rc;
1704 }
1705
1706 /*
1707 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1708 * then user want to wait until the last command is done.
1709 */
1710 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1711 /*
1712 * Wait maximum for the current exe_queue length iterations plus
1713 * one (for the current pending command).
1714 */
1715 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1716
1717 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1718 max_iterations--) {
1719
1720 /* Wait for the current command to complete */
1721 rc = raw->wait_comp(bp, raw);
1722 if (rc)
1723 return rc;
1724
1725 /* Make a next step */
1726 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1727 ramrod_flags);
1728 if (rc < 0)
1729 return rc;
1730 }
1731
1732 return 0;
1733 }
1734
1735 return rc;
1736}
1737
1738
1739
1740/**
1741 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1742 *
1743 * @bp: device handle
1744 * @o:
1745 * @vlan_mac_flags:
1746 * @ramrod_flags: execution flags to be used for this deletion
1747 *
1748 * if the last operation has completed successfully and there are no
1749 * moreelements left, positive value if the last operation has completed
1750 * successfully and there are more previously configured elements, negative
1751 * value is current operation has failed.
1752 */
1753static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1754 struct bnx2x_vlan_mac_obj *o,
1755 unsigned long *vlan_mac_flags,
1756 unsigned long *ramrod_flags)
1757{
1758 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1759 int rc = 0;
1760 struct bnx2x_vlan_mac_ramrod_params p;
1761 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1762 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1763
1764 /* Clear pending commands first */
1765
1766 spin_lock_bh(&exeq->lock);
1767
1768 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1769 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1770 *vlan_mac_flags)
1771 list_del(&exeq_pos->link);
1772 }
1773
1774 spin_unlock_bh(&exeq->lock);
1775
1776 /* Prepare a command request */
1777 memset(&p, 0, sizeof(p));
1778 p.vlan_mac_obj = o;
1779 p.ramrod_flags = *ramrod_flags;
1780 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1781
1782 /*
1783 * Add all but the last VLAN-MAC to the execution queue without actually
1784 * execution anything.
1785 */
1786 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1787 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1788 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1789
1790 list_for_each_entry(pos, &o->head, link) {
1791 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1792 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1793 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1794 rc = bnx2x_config_vlan_mac(bp, &p);
1795 if (rc < 0) {
1796 BNX2X_ERR("Failed to add a new DEL command\n");
1797 return rc;
1798 }
1799 }
1800 }
1801
1802 p.ramrod_flags = *ramrod_flags;
1803 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1804
1805 return bnx2x_config_vlan_mac(bp, &p);
1806}
1807
1808static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1809 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1810 unsigned long *pstate, bnx2x_obj_type type)
1811{
1812 raw->func_id = func_id;
1813 raw->cid = cid;
1814 raw->cl_id = cl_id;
1815 raw->rdata = rdata;
1816 raw->rdata_mapping = rdata_mapping;
1817 raw->state = state;
1818 raw->pstate = pstate;
1819 raw->obj_type = type;
1820 raw->check_pending = bnx2x_raw_check_pending;
1821 raw->clear_pending = bnx2x_raw_clear_pending;
1822 raw->set_pending = bnx2x_raw_set_pending;
1823 raw->wait_comp = bnx2x_raw_wait;
1824}
1825
1826static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1827 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1828 int state, unsigned long *pstate, bnx2x_obj_type type,
1829 struct bnx2x_credit_pool_obj *macs_pool,
1830 struct bnx2x_credit_pool_obj *vlans_pool)
1831{
1832 INIT_LIST_HEAD(&o->head);
1833
1834 o->macs_pool = macs_pool;
1835 o->vlans_pool = vlans_pool;
1836
1837 o->delete_all = bnx2x_vlan_mac_del_all;
1838 o->restore = bnx2x_vlan_mac_restore;
1839 o->complete = bnx2x_complete_vlan_mac;
1840 o->wait = bnx2x_wait_vlan_mac;
1841
1842 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1843 state, pstate, type);
1844}
1845
1846
1847void bnx2x_init_mac_obj(struct bnx2x *bp,
1848 struct bnx2x_vlan_mac_obj *mac_obj,
1849 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1850 dma_addr_t rdata_mapping, int state,
1851 unsigned long *pstate, bnx2x_obj_type type,
1852 struct bnx2x_credit_pool_obj *macs_pool)
1853{
1854 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1855
1856 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1857 rdata_mapping, state, pstate, type,
1858 macs_pool, NULL);
1859
1860 /* CAM credit pool handling */
1861 mac_obj->get_credit = bnx2x_get_credit_mac;
1862 mac_obj->put_credit = bnx2x_put_credit_mac;
1863 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1864 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1865
1866 if (CHIP_IS_E1x(bp)) {
1867 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1868 mac_obj->check_del = bnx2x_check_mac_del;
1869 mac_obj->check_add = bnx2x_check_mac_add;
1870 mac_obj->check_move = bnx2x_check_move_always_err;
1871 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1872
1873 /* Exe Queue */
1874 bnx2x_exe_queue_init(bp,
1875 &mac_obj->exe_queue, 1, qable_obj,
1876 bnx2x_validate_vlan_mac,
1877 bnx2x_optimize_vlan_mac,
1878 bnx2x_execute_vlan_mac,
1879 bnx2x_exeq_get_mac);
1880 } else {
1881 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1882 mac_obj->check_del = bnx2x_check_mac_del;
1883 mac_obj->check_add = bnx2x_check_mac_add;
1884 mac_obj->check_move = bnx2x_check_move;
1885 mac_obj->ramrod_cmd =
1886 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1887
1888 /* Exe Queue */
1889 bnx2x_exe_queue_init(bp,
1890 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1891 qable_obj, bnx2x_validate_vlan_mac,
1892 bnx2x_optimize_vlan_mac,
1893 bnx2x_execute_vlan_mac,
1894 bnx2x_exeq_get_mac);
1895 }
1896}
1897
1898void bnx2x_init_vlan_obj(struct bnx2x *bp,
1899 struct bnx2x_vlan_mac_obj *vlan_obj,
1900 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1901 dma_addr_t rdata_mapping, int state,
1902 unsigned long *pstate, bnx2x_obj_type type,
1903 struct bnx2x_credit_pool_obj *vlans_pool)
1904{
1905 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1906
1907 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1908 rdata_mapping, state, pstate, type, NULL,
1909 vlans_pool);
1910
1911 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1912 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1913 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1914 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1915
1916 if (CHIP_IS_E1x(bp)) {
1917 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1918 BUG();
1919 } else {
1920 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1921 vlan_obj->check_del = bnx2x_check_vlan_del;
1922 vlan_obj->check_add = bnx2x_check_vlan_add;
1923 vlan_obj->check_move = bnx2x_check_move;
1924 vlan_obj->ramrod_cmd =
1925 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1926
1927 /* Exe Queue */
1928 bnx2x_exe_queue_init(bp,
1929 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1930 qable_obj, bnx2x_validate_vlan_mac,
1931 bnx2x_optimize_vlan_mac,
1932 bnx2x_execute_vlan_mac,
1933 bnx2x_exeq_get_vlan);
1934 }
1935}
1936
1937void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1938 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1939 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1940 dma_addr_t rdata_mapping, int state,
1941 unsigned long *pstate, bnx2x_obj_type type,
1942 struct bnx2x_credit_pool_obj *macs_pool,
1943 struct bnx2x_credit_pool_obj *vlans_pool)
1944{
1945 union bnx2x_qable_obj *qable_obj =
1946 (union bnx2x_qable_obj *)vlan_mac_obj;
1947
1948 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1949 rdata_mapping, state, pstate, type,
1950 macs_pool, vlans_pool);
1951
1952 /* CAM pool handling */
1953 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1954 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1955 /*
1956 * CAM offset is relevant for 57710 and 57711 chips only which have a
1957 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1958 * will be taken from MACs' pool object only.
1959 */
1960 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1961 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1962
1963 if (CHIP_IS_E1(bp)) {
1964 BNX2X_ERR("Do not support chips others than E2\n");
1965 BUG();
1966 } else if (CHIP_IS_E1H(bp)) {
1967 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
1968 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1969 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1970 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
1971 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1972
1973 /* Exe Queue */
1974 bnx2x_exe_queue_init(bp,
1975 &vlan_mac_obj->exe_queue, 1, qable_obj,
1976 bnx2x_validate_vlan_mac,
1977 bnx2x_optimize_vlan_mac,
1978 bnx2x_execute_vlan_mac,
1979 bnx2x_exeq_get_vlan_mac);
1980 } else {
1981 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
1982 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1983 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1984 vlan_mac_obj->check_move = bnx2x_check_move;
1985 vlan_mac_obj->ramrod_cmd =
1986 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1987
1988 /* Exe Queue */
1989 bnx2x_exe_queue_init(bp,
1990 &vlan_mac_obj->exe_queue,
1991 CLASSIFY_RULES_COUNT,
1992 qable_obj, bnx2x_validate_vlan_mac,
1993 bnx2x_optimize_vlan_mac,
1994 bnx2x_execute_vlan_mac,
1995 bnx2x_exeq_get_vlan_mac);
1996 }
1997
1998}
1999
2000/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2001static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2002 struct tstorm_eth_mac_filter_config *mac_filters,
2003 u16 pf_id)
2004{
2005 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2006
2007 u32 addr = BAR_TSTRORM_INTMEM +
2008 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2009
2010 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2011}
2012
2013static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2014 struct bnx2x_rx_mode_ramrod_params *p)
2015{
2016 /* update the bp MAC filter structure */
2017 u32 mask = (1 << p->cl_id);
2018
2019 struct tstorm_eth_mac_filter_config *mac_filters =
2020 (struct tstorm_eth_mac_filter_config *)p->rdata;
2021
2022 /* initial seeting is drop-all */
2023 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2024 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2025 u8 unmatched_unicast = 0;
2026
2027 /* In e1x there we only take into account rx acceot flag since tx switching
2028 * isn't enabled. */
2029 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2030 /* accept matched ucast */
2031 drop_all_ucast = 0;
2032
2033 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2034 /* accept matched mcast */
2035 drop_all_mcast = 0;
2036
2037 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2038 /* accept all mcast */
2039 drop_all_ucast = 0;
2040 accp_all_ucast = 1;
2041 }
2042 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2043 /* accept all mcast */
2044 drop_all_mcast = 0;
2045 accp_all_mcast = 1;
2046 }
2047 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2048 /* accept (all) bcast */
2049 accp_all_bcast = 1;
2050 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2051 /* accept unmatched unicasts */
2052 unmatched_unicast = 1;
2053
2054 mac_filters->ucast_drop_all = drop_all_ucast ?
2055 mac_filters->ucast_drop_all | mask :
2056 mac_filters->ucast_drop_all & ~mask;
2057
2058 mac_filters->mcast_drop_all = drop_all_mcast ?
2059 mac_filters->mcast_drop_all | mask :
2060 mac_filters->mcast_drop_all & ~mask;
2061
2062 mac_filters->ucast_accept_all = accp_all_ucast ?
2063 mac_filters->ucast_accept_all | mask :
2064 mac_filters->ucast_accept_all & ~mask;
2065
2066 mac_filters->mcast_accept_all = accp_all_mcast ?
2067 mac_filters->mcast_accept_all | mask :
2068 mac_filters->mcast_accept_all & ~mask;
2069
2070 mac_filters->bcast_accept_all = accp_all_bcast ?
2071 mac_filters->bcast_accept_all | mask :
2072 mac_filters->bcast_accept_all & ~mask;
2073
2074 mac_filters->unmatched_unicast = unmatched_unicast ?
2075 mac_filters->unmatched_unicast | mask :
2076 mac_filters->unmatched_unicast & ~mask;
2077
2078 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2079 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2080 mac_filters->ucast_drop_all,
2081 mac_filters->mcast_drop_all,
2082 mac_filters->ucast_accept_all,
2083 mac_filters->mcast_accept_all,
2084 mac_filters->bcast_accept_all);
2085
2086 /* write the MAC filter structure*/
2087 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2088
2089 /* The operation is completed */
2090 clear_bit(p->state, p->pstate);
2091 smp_mb__after_clear_bit();
2092
2093 return 0;
2094}
2095
2096/* Setup ramrod data */
2097static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2098 struct eth_classify_header *hdr,
2099 u8 rule_cnt)
2100{
2101 hdr->echo = cid;
2102 hdr->rule_cnt = rule_cnt;
2103}
2104
2105static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2106 unsigned long accept_flags,
2107 struct eth_filter_rules_cmd *cmd,
2108 bool clear_accept_all)
2109{
2110 u16 state;
2111
2112 /* start with 'drop-all' */
2113 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2114 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2115
2116 if (accept_flags) {
2117 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2118 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2119
2120 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2121 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2122
2123 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2124 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2125 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2126 }
2127
2128 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2129 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2130 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2131 }
2132 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2133 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2134
2135 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2136 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2137 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2138 }
2139 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2140 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2141 }
2142
2143 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2144 if (clear_accept_all) {
2145 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2146 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2147 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2148 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2149 }
2150
2151 cmd->state = cpu_to_le16(state);
2152
2153}
2154
2155static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2156 struct bnx2x_rx_mode_ramrod_params *p)
2157{
2158 struct eth_filter_rules_ramrod_data *data = p->rdata;
2159 int rc;
2160 u8 rule_idx = 0;
2161
2162 /* Reset the ramrod data buffer */
2163 memset(data, 0, sizeof(*data));
2164
2165 /* Setup ramrod data */
2166
2167 /* Tx (internal switching) */
2168 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2169 data->rules[rule_idx].client_id = p->cl_id;
2170 data->rules[rule_idx].func_id = p->func_id;
2171
2172 data->rules[rule_idx].cmd_general_data =
2173 ETH_FILTER_RULES_CMD_TX_CMD;
2174
2175 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2176 &(data->rules[rule_idx++]), false);
2177 }
2178
2179 /* Rx */
2180 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2181 data->rules[rule_idx].client_id = p->cl_id;
2182 data->rules[rule_idx].func_id = p->func_id;
2183
2184 data->rules[rule_idx].cmd_general_data =
2185 ETH_FILTER_RULES_CMD_RX_CMD;
2186
2187 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2188 &(data->rules[rule_idx++]), false);
2189 }
2190
2191
2192 /*
2193 * If FCoE Queue configuration has been requested configure the Rx and
2194 * internal switching modes for this queue in separate rules.
2195 *
2196 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2197 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2198 */
2199 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2200 /* Tx (internal switching) */
2201 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2202 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2203 data->rules[rule_idx].func_id = p->func_id;
2204
2205 data->rules[rule_idx].cmd_general_data =
2206 ETH_FILTER_RULES_CMD_TX_CMD;
2207
2208 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2209 &(data->rules[rule_idx++]),
2210 true);
2211 }
2212
2213 /* Rx */
2214 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2215 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2216 data->rules[rule_idx].func_id = p->func_id;
2217
2218 data->rules[rule_idx].cmd_general_data =
2219 ETH_FILTER_RULES_CMD_RX_CMD;
2220
2221 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2222 &(data->rules[rule_idx++]),
2223 true);
2224 }
2225 }
2226
2227 /*
2228 * Set the ramrod header (most importantly - number of rules to
2229 * configure).
2230 */
2231 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2232
2233 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2234 "tx_accept_flags 0x%lx\n",
2235 data->header.rule_cnt, p->rx_accept_flags,
2236 p->tx_accept_flags);
2237
2238 /*
2239 * No need for an explicit memory barrier here as long we would
2240 * need to ensure the ordering of writing to the SPQ element
2241 * and updating of the SPQ producer which involves a memory
2242 * read and we will have to put a full memory barrier there
2243 * (inside bnx2x_sp_post()).
2244 */
2245
2246 /* Send a ramrod */
2247 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2248 U64_HI(p->rdata_mapping),
2249 U64_LO(p->rdata_mapping),
2250 ETH_CONNECTION_TYPE);
2251 if (rc)
2252 return rc;
2253
2254 /* Ramrod completion is pending */
2255 return 1;
2256}
2257
2258static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2259 struct bnx2x_rx_mode_ramrod_params *p)
2260{
2261 return bnx2x_state_wait(bp, p->state, p->pstate);
2262}
2263
2264static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2265 struct bnx2x_rx_mode_ramrod_params *p)
2266{
2267 /* Do nothing */
2268 return 0;
2269}
2270
2271int bnx2x_config_rx_mode(struct bnx2x *bp,
2272 struct bnx2x_rx_mode_ramrod_params *p)
2273{
2274 int rc;
2275
2276 /* Configure the new classification in the chip */
2277 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2278 if (rc < 0)
2279 return rc;
2280
2281 /* Wait for a ramrod completion if was requested */
2282 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2283 rc = p->rx_mode_obj->wait_comp(bp, p);
2284 if (rc)
2285 return rc;
2286 }
2287
2288 return rc;
2289}
2290
2291void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2292 struct bnx2x_rx_mode_obj *o)
2293{
2294 if (CHIP_IS_E1x(bp)) {
2295 o->wait_comp = bnx2x_empty_rx_mode_wait;
2296 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2297 } else {
2298 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2299 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2300 }
2301}
2302
2303/********************* Multicast verbs: SET, CLEAR ****************************/
2304static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2305{
2306 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2307}
2308
2309struct bnx2x_mcast_mac_elem {
2310 struct list_head link;
2311 u8 mac[ETH_ALEN];
2312 u8 pad[2]; /* For a natural alignment of the following buffer */
2313};
2314
2315struct bnx2x_pending_mcast_cmd {
2316 struct list_head link;
2317 int type; /* BNX2X_MCAST_CMD_X */
2318 union {
2319 struct list_head macs_head;
2320 u32 macs_num; /* Needed for DEL command */
2321 int next_bin; /* Needed for RESTORE flow with aprox match */
2322 } data;
2323
2324 bool done; /* set to true, when the command has been handled,
2325 * practically used in 57712 handling only, where one pending
2326 * command may be handled in a few operations. As long as for
2327 * other chips every operation handling is completed in a
2328 * single ramrod, there is no need to utilize this field.
2329 */
2330};
2331
2332static int bnx2x_mcast_wait(struct bnx2x *bp,
2333 struct bnx2x_mcast_obj *o)
2334{
2335 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2336 o->raw.wait_comp(bp, &o->raw))
2337 return -EBUSY;
2338
2339 return 0;
2340}
2341
2342static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2343 struct bnx2x_mcast_obj *o,
2344 struct bnx2x_mcast_ramrod_params *p,
2345 int cmd)
2346{
2347 int total_sz;
2348 struct bnx2x_pending_mcast_cmd *new_cmd;
2349 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2350 struct bnx2x_mcast_list_elem *pos;
2351 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2352 p->mcast_list_len : 0);
2353
2354 /* If the command is empty ("handle pending commands only"), break */
2355 if (!p->mcast_list_len)
2356 return 0;
2357
2358 total_sz = sizeof(*new_cmd) +
2359 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2360
2361 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2362 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2363
2364 if (!new_cmd)
2365 return -ENOMEM;
2366
2367 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2368 "macs_list_len=%d\n", cmd, macs_list_len);
2369
2370 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2371
2372 new_cmd->type = cmd;
2373 new_cmd->done = false;
2374
2375 switch (cmd) {
2376 case BNX2X_MCAST_CMD_ADD:
2377 cur_mac = (struct bnx2x_mcast_mac_elem *)
2378 ((u8 *)new_cmd + sizeof(*new_cmd));
2379
2380 /* Push the MACs of the current command into the pendig command
2381 * MACs list: FIFO
2382 */
2383 list_for_each_entry(pos, &p->mcast_list, link) {
2384 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2385 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2386 cur_mac++;
2387 }
2388
2389 break;
2390
2391 case BNX2X_MCAST_CMD_DEL:
2392 new_cmd->data.macs_num = p->mcast_list_len;
2393 break;
2394
2395 case BNX2X_MCAST_CMD_RESTORE:
2396 new_cmd->data.next_bin = 0;
2397 break;
2398
2399 default:
2400 BNX2X_ERR("Unknown command: %d\n", cmd);
2401 return -EINVAL;
2402 }
2403
2404 /* Push the new pending command to the tail of the pending list: FIFO */
2405 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2406
2407 o->set_sched(o);
2408
2409 return 1;
2410}
2411
2412/**
2413 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2414 *
2415 * @o:
2416 * @last: index to start looking from (including)
2417 *
2418 * returns the next found (set) bin or a negative value if none is found.
2419 */
2420static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2421{
2422 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2423
2424 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2425 if (o->registry.aprox_match.vec[i])
2426 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2427 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2428 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2429 vec, cur_bit)) {
2430 return cur_bit;
2431 }
2432 }
2433 inner_start = 0;
2434 }
2435
2436 /* None found */
2437 return -1;
2438}
2439
2440/**
2441 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2442 *
2443 * @o:
2444 *
2445 * returns the index of the found bin or -1 if none is found
2446 */
2447static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2448{
2449 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2450
2451 if (cur_bit >= 0)
2452 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2453
2454 return cur_bit;
2455}
2456
2457static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2458{
2459 struct bnx2x_raw_obj *raw = &o->raw;
2460 u8 rx_tx_flag = 0;
2461
2462 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2463 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2464 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2465
2466 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2467 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2468 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2469
2470 return rx_tx_flag;
2471}
2472
2473static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2474 struct bnx2x_mcast_obj *o, int idx,
2475 union bnx2x_mcast_config_data *cfg_data,
2476 int cmd)
2477{
2478 struct bnx2x_raw_obj *r = &o->raw;
2479 struct eth_multicast_rules_ramrod_data *data =
2480 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2481 u8 func_id = r->func_id;
2482 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2483 int bin;
2484
2485 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2486 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2487
2488 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2489
2490 /* Get a bin and update a bins' vector */
2491 switch (cmd) {
2492 case BNX2X_MCAST_CMD_ADD:
2493 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2494 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2495 break;
2496
2497 case BNX2X_MCAST_CMD_DEL:
2498 /* If there were no more bins to clear
2499 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2500 * clear any (0xff) bin.
2501 * See bnx2x_mcast_validate_e2() for explanation when it may
2502 * happen.
2503 */
2504 bin = bnx2x_mcast_clear_first_bin(o);
2505 break;
2506
2507 case BNX2X_MCAST_CMD_RESTORE:
2508 bin = cfg_data->bin;
2509 break;
2510
2511 default:
2512 BNX2X_ERR("Unknown command: %d\n", cmd);
2513 return;
2514 }
2515
2516 DP(BNX2X_MSG_SP, "%s bin %d\n",
2517 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2518 "Setting" : "Clearing"), bin);
2519
2520 data->rules[idx].bin_id = (u8)bin;
2521 data->rules[idx].func_id = func_id;
2522 data->rules[idx].engine_id = o->engine_id;
2523}
2524
2525/**
2526 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2527 *
2528 * @bp: device handle
2529 * @o:
2530 * @start_bin: index in the registry to start from (including)
2531 * @rdata_idx: index in the ramrod data to start from
2532 *
2533 * returns last handled bin index or -1 if all bins have been handled
2534 */
2535static inline int bnx2x_mcast_handle_restore_cmd_e2(
2536 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2537 int *rdata_idx)
2538{
2539 int cur_bin, cnt = *rdata_idx;
2540 union bnx2x_mcast_config_data cfg_data = {0};
2541
2542 /* go through the registry and configure the bins from it */
2543 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2544 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2545
2546 cfg_data.bin = (u8)cur_bin;
2547 o->set_one_rule(bp, o, cnt, &cfg_data,
2548 BNX2X_MCAST_CMD_RESTORE);
2549
2550 cnt++;
2551
2552 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2553
2554 /* Break if we reached the maximum number
2555 * of rules.
2556 */
2557 if (cnt >= o->max_cmd_len)
2558 break;
2559 }
2560
2561 *rdata_idx = cnt;
2562
2563 return cur_bin;
2564}
2565
2566static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2567 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2568 int *line_idx)
2569{
2570 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2571 int cnt = *line_idx;
2572 union bnx2x_mcast_config_data cfg_data = {0};
2573
2574 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2575 link) {
2576
2577 cfg_data.mac = &pmac_pos->mac[0];
2578 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2579
2580 cnt++;
2581
2582 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2583 " mcast MAC\n",
2584 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
2585
2586 list_del(&pmac_pos->link);
2587
2588 /* Break if we reached the maximum number
2589 * of rules.
2590 */
2591 if (cnt >= o->max_cmd_len)
2592 break;
2593 }
2594
2595 *line_idx = cnt;
2596
2597 /* if no more MACs to configure - we are done */
2598 if (list_empty(&cmd_pos->data.macs_head))
2599 cmd_pos->done = true;
2600}
2601
2602static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2603 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2604 int *line_idx)
2605{
2606 int cnt = *line_idx;
2607
2608 while (cmd_pos->data.macs_num) {
2609 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2610
2611 cnt++;
2612
2613 cmd_pos->data.macs_num--;
2614
2615 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2616 cmd_pos->data.macs_num, cnt);
2617
2618 /* Break if we reached the maximum
2619 * number of rules.
2620 */
2621 if (cnt >= o->max_cmd_len)
2622 break;
2623 }
2624
2625 *line_idx = cnt;
2626
2627 /* If we cleared all bins - we are done */
2628 if (!cmd_pos->data.macs_num)
2629 cmd_pos->done = true;
2630}
2631
2632static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2633 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2634 int *line_idx)
2635{
2636 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2637 line_idx);
2638
2639 if (cmd_pos->data.next_bin < 0)
2640 /* If o->set_restore returned -1 we are done */
2641 cmd_pos->done = true;
2642 else
2643 /* Start from the next bin next time */
2644 cmd_pos->data.next_bin++;
2645}
2646
2647static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2648 struct bnx2x_mcast_ramrod_params *p)
2649{
2650 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2651 int cnt = 0;
2652 struct bnx2x_mcast_obj *o = p->mcast_obj;
2653
2654 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2655 link) {
2656 switch (cmd_pos->type) {
2657 case BNX2X_MCAST_CMD_ADD:
2658 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2659 break;
2660
2661 case BNX2X_MCAST_CMD_DEL:
2662 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2663 break;
2664
2665 case BNX2X_MCAST_CMD_RESTORE:
2666 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2667 &cnt);
2668 break;
2669
2670 default:
2671 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2672 return -EINVAL;
2673 }
2674
2675 /* If the command has been completed - remove it from the list
2676 * and free the memory
2677 */
2678 if (cmd_pos->done) {
2679 list_del(&cmd_pos->link);
2680 kfree(cmd_pos);
2681 }
2682
2683 /* Break if we reached the maximum number of rules */
2684 if (cnt >= o->max_cmd_len)
2685 break;
2686 }
2687
2688 return cnt;
2689}
2690
2691static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2692 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2693 int *line_idx)
2694{
2695 struct bnx2x_mcast_list_elem *mlist_pos;
2696 union bnx2x_mcast_config_data cfg_data = {0};
2697 int cnt = *line_idx;
2698
2699 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2700 cfg_data.mac = mlist_pos->mac;
2701 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2702
2703 cnt++;
2704
2705 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2706 " mcast MAC\n",
2707 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
2708 }
2709
2710 *line_idx = cnt;
2711}
2712
2713static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2714 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2715 int *line_idx)
2716{
2717 int cnt = *line_idx, i;
2718
2719 for (i = 0; i < p->mcast_list_len; i++) {
2720 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2721
2722 cnt++;
2723
2724 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2725 p->mcast_list_len - i - 1);
2726 }
2727
2728 *line_idx = cnt;
2729}
2730
2731/**
2732 * bnx2x_mcast_handle_current_cmd -
2733 *
2734 * @bp: device handle
2735 * @p:
2736 * @cmd:
2737 * @start_cnt: first line in the ramrod data that may be used
2738 *
2739 * This function is called iff there is enough place for the current command in
2740 * the ramrod data.
2741 * Returns number of lines filled in the ramrod data in total.
2742 */
2743static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2744 struct bnx2x_mcast_ramrod_params *p, int cmd,
2745 int start_cnt)
2746{
2747 struct bnx2x_mcast_obj *o = p->mcast_obj;
2748 int cnt = start_cnt;
2749
2750 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2751
2752 switch (cmd) {
2753 case BNX2X_MCAST_CMD_ADD:
2754 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2755 break;
2756
2757 case BNX2X_MCAST_CMD_DEL:
2758 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2759 break;
2760
2761 case BNX2X_MCAST_CMD_RESTORE:
2762 o->hdl_restore(bp, o, 0, &cnt);
2763 break;
2764
2765 default:
2766 BNX2X_ERR("Unknown command: %d\n", cmd);
2767 return -EINVAL;
2768 }
2769
2770 /* The current command has been handled */
2771 p->mcast_list_len = 0;
2772
2773 return cnt;
2774}
2775
2776static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2777 struct bnx2x_mcast_ramrod_params *p,
2778 int cmd)
2779{
2780 struct bnx2x_mcast_obj *o = p->mcast_obj;
2781 int reg_sz = o->get_registry_size(o);
2782
2783 switch (cmd) {
2784 /* DEL command deletes all currently configured MACs */
2785 case BNX2X_MCAST_CMD_DEL:
2786 o->set_registry_size(o, 0);
2787 /* Don't break */
2788
2789 /* RESTORE command will restore the entire multicast configuration */
2790 case BNX2X_MCAST_CMD_RESTORE:
2791 /* Here we set the approximate amount of work to do, which in
2792 * fact may be only less as some MACs in postponed ADD
2793 * command(s) scheduled before this command may fall into
2794 * the same bin and the actual number of bins set in the
2795 * registry would be less than we estimated here. See
2796 * bnx2x_mcast_set_one_rule_e2() for further details.
2797 */
2798 p->mcast_list_len = reg_sz;
2799 break;
2800
2801 case BNX2X_MCAST_CMD_ADD:
2802 case BNX2X_MCAST_CMD_CONT:
2803 /* Here we assume that all new MACs will fall into new bins.
2804 * However we will correct the real registry size after we
2805 * handle all pending commands.
2806 */
2807 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2808 break;
2809
2810 default:
2811 BNX2X_ERR("Unknown command: %d\n", cmd);
2812 return -EINVAL;
2813
2814 }
2815
2816 /* Increase the total number of MACs pending to be configured */
2817 o->total_pending_num += p->mcast_list_len;
2818
2819 return 0;
2820}
2821
2822static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2823 struct bnx2x_mcast_ramrod_params *p,
2824 int old_num_bins)
2825{
2826 struct bnx2x_mcast_obj *o = p->mcast_obj;
2827
2828 o->set_registry_size(o, old_num_bins);
2829 o->total_pending_num -= p->mcast_list_len;
2830}
2831
2832/**
2833 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2834 *
2835 * @bp: device handle
2836 * @p:
2837 * @len: number of rules to handle
2838 */
2839static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2840 struct bnx2x_mcast_ramrod_params *p,
2841 u8 len)
2842{
2843 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2844 struct eth_multicast_rules_ramrod_data *data =
2845 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2846
2847 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2848 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2849 data->header.rule_cnt = len;
2850}
2851
2852/**
2853 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2854 *
2855 * @bp: device handle
2856 * @o:
2857 *
2858 * Recalculate the actual number of set bins in the registry using Brian
2859 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2860 *
2861 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2862 */
2863static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2864 struct bnx2x_mcast_obj *o)
2865{
2866 int i, cnt = 0;
2867 u64 elem;
2868
2869 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2870 elem = o->registry.aprox_match.vec[i];
2871 for (; elem; cnt++)
2872 elem &= elem - 1;
2873 }
2874
2875 o->set_registry_size(o, cnt);
2876
2877 return 0;
2878}
2879
2880static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2881 struct bnx2x_mcast_ramrod_params *p,
2882 int cmd)
2883{
2884 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2885 struct bnx2x_mcast_obj *o = p->mcast_obj;
2886 struct eth_multicast_rules_ramrod_data *data =
2887 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2888 int cnt = 0, rc;
2889
2890 /* Reset the ramrod data buffer */
2891 memset(data, 0, sizeof(*data));
2892
2893 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2894
2895 /* If there are no more pending commands - clear SCHEDULED state */
2896 if (list_empty(&o->pending_cmds_head))
2897 o->clear_sched(o);
2898
2899 /* The below may be true iff there was enough room in ramrod
2900 * data for all pending commands and for the current
2901 * command. Otherwise the current command would have been added
2902 * to the pending commands and p->mcast_list_len would have been
2903 * zeroed.
2904 */
2905 if (p->mcast_list_len > 0)
2906 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2907
2908 /* We've pulled out some MACs - update the total number of
2909 * outstanding.
2910 */
2911 o->total_pending_num -= cnt;
2912
2913 /* send a ramrod */
2914 WARN_ON(o->total_pending_num < 0);
2915 WARN_ON(cnt > o->max_cmd_len);
2916
2917 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2918
2919 /* Update a registry size if there are no more pending operations.
2920 *
2921 * We don't want to change the value of the registry size if there are
2922 * pending operations because we want it to always be equal to the
2923 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2924 * set bins after the last requested operation in order to properly
2925 * evaluate the size of the next DEL/RESTORE operation.
2926 *
2927 * Note that we update the registry itself during command(s) handling
2928 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2929 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2930 * with a limited amount of update commands (per MAC/bin) and we don't
2931 * know in this scope what the actual state of bins configuration is
2932 * going to be after this ramrod.
2933 */
2934 if (!o->total_pending_num)
2935 bnx2x_mcast_refresh_registry_e2(bp, o);
2936
2937 /*
2938 * If CLEAR_ONLY was requested - don't send a ramrod and clear
2939 * RAMROD_PENDING status immediately.
2940 */
2941 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2942 raw->clear_pending(raw);
2943 return 0;
2944 } else {
2945 /*
2946 * No need for an explicit memory barrier here as long we would
2947 * need to ensure the ordering of writing to the SPQ element
2948 * and updating of the SPQ producer which involves a memory
2949 * read and we will have to put a full memory barrier there
2950 * (inside bnx2x_sp_post()).
2951 */
2952
2953 /* Send a ramrod */
2954 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2955 raw->cid, U64_HI(raw->rdata_mapping),
2956 U64_LO(raw->rdata_mapping),
2957 ETH_CONNECTION_TYPE);
2958 if (rc)
2959 return rc;
2960
2961 /* Ramrod completion is pending */
2962 return 1;
2963 }
2964}
2965
2966static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2967 struct bnx2x_mcast_ramrod_params *p,
2968 int cmd)
2969{
2970 /* Mark, that there is a work to do */
2971 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2972 p->mcast_list_len = 1;
2973
2974 return 0;
2975}
2976
2977static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2978 struct bnx2x_mcast_ramrod_params *p,
2979 int old_num_bins)
2980{
2981 /* Do nothing */
2982}
2983
2984#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2985do { \
2986 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2987} while (0)
2988
2989static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2990 struct bnx2x_mcast_obj *o,
2991 struct bnx2x_mcast_ramrod_params *p,
2992 u32 *mc_filter)
2993{
2994 struct bnx2x_mcast_list_elem *mlist_pos;
2995 int bit;
2996
2997 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2998 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2999 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3000
3001 DP(BNX2X_MSG_SP, "About to configure "
3002 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
3003 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
3004
3005 /* bookkeeping... */
3006 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3007 bit);
3008 }
3009}
3010
3011static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3012 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3013 u32 *mc_filter)
3014{
3015 int bit;
3016
3017 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3018 bit >= 0;
3019 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3020 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3021 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3022 }
3023}
3024
3025/* On 57711 we write the multicast MACs' aproximate match
3026 * table by directly into the TSTORM's internal RAM. So we don't
3027 * really need to handle any tricks to make it work.
3028 */
3029static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3030 struct bnx2x_mcast_ramrod_params *p,
3031 int cmd)
3032{
3033 int i;
3034 struct bnx2x_mcast_obj *o = p->mcast_obj;
3035 struct bnx2x_raw_obj *r = &o->raw;
3036
3037 /* If CLEAR_ONLY has been requested - clear the registry
3038 * and clear a pending bit.
3039 */
3040 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3041 u32 mc_filter[MC_HASH_SIZE] = {0};
3042
3043 /* Set the multicast filter bits before writing it into
3044 * the internal memory.
3045 */
3046 switch (cmd) {
3047 case BNX2X_MCAST_CMD_ADD:
3048 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3049 break;
3050
3051 case BNX2X_MCAST_CMD_DEL:
3052 DP(BNX2X_MSG_SP, "Invalidating multicast "
3053 "MACs configuration\n");
3054
3055 /* clear the registry */
3056 memset(o->registry.aprox_match.vec, 0,
3057 sizeof(o->registry.aprox_match.vec));
3058 break;
3059
3060 case BNX2X_MCAST_CMD_RESTORE:
3061 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3062 break;
3063
3064 default:
3065 BNX2X_ERR("Unknown command: %d\n", cmd);
3066 return -EINVAL;
3067 }
3068
3069 /* Set the mcast filter in the internal memory */
3070 for (i = 0; i < MC_HASH_SIZE; i++)
3071 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3072 } else
3073 /* clear the registry */
3074 memset(o->registry.aprox_match.vec, 0,
3075 sizeof(o->registry.aprox_match.vec));
3076
3077 /* We are done */
3078 r->clear_pending(r);
3079
3080 return 0;
3081}
3082
3083static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3084 struct bnx2x_mcast_ramrod_params *p,
3085 int cmd)
3086{
3087 struct bnx2x_mcast_obj *o = p->mcast_obj;
3088 int reg_sz = o->get_registry_size(o);
3089
3090 switch (cmd) {
3091 /* DEL command deletes all currently configured MACs */
3092 case BNX2X_MCAST_CMD_DEL:
3093 o->set_registry_size(o, 0);
3094 /* Don't break */
3095
3096 /* RESTORE command will restore the entire multicast configuration */
3097 case BNX2X_MCAST_CMD_RESTORE:
3098 p->mcast_list_len = reg_sz;
3099 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3100 cmd, p->mcast_list_len);
3101 break;
3102
3103 case BNX2X_MCAST_CMD_ADD:
3104 case BNX2X_MCAST_CMD_CONT:
3105 /* Multicast MACs on 57710 are configured as unicast MACs and
3106 * there is only a limited number of CAM entries for that
3107 * matter.
3108 */
3109 if (p->mcast_list_len > o->max_cmd_len) {
3110 BNX2X_ERR("Can't configure more than %d multicast MACs"
3111 "on 57710\n", o->max_cmd_len);
3112 return -EINVAL;
3113 }
3114 /* Every configured MAC should be cleared if DEL command is
3115 * called. Only the last ADD command is relevant as long as
3116 * every ADD commands overrides the previous configuration.
3117 */
3118 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3119 if (p->mcast_list_len > 0)
3120 o->set_registry_size(o, p->mcast_list_len);
3121
3122 break;
3123
3124 default:
3125 BNX2X_ERR("Unknown command: %d\n", cmd);
3126 return -EINVAL;
3127
3128 }
3129
3130 /* We want to ensure that commands are executed one by one for 57710.
3131 * Therefore each none-empty command will consume o->max_cmd_len.
3132 */
3133 if (p->mcast_list_len)
3134 o->total_pending_num += o->max_cmd_len;
3135
3136 return 0;
3137}
3138
3139static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3140 struct bnx2x_mcast_ramrod_params *p,
3141 int old_num_macs)
3142{
3143 struct bnx2x_mcast_obj *o = p->mcast_obj;
3144
3145 o->set_registry_size(o, old_num_macs);
3146
3147 /* If current command hasn't been handled yet and we are
3148 * here means that it's meant to be dropped and we have to
3149 * update the number of outstandling MACs accordingly.
3150 */
3151 if (p->mcast_list_len)
3152 o->total_pending_num -= o->max_cmd_len;
3153}
3154
3155static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3156 struct bnx2x_mcast_obj *o, int idx,
3157 union bnx2x_mcast_config_data *cfg_data,
3158 int cmd)
3159{
3160 struct bnx2x_raw_obj *r = &o->raw;
3161 struct mac_configuration_cmd *data =
3162 (struct mac_configuration_cmd *)(r->rdata);
3163
3164 /* copy mac */
3165 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3166 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3167 &data->config_table[idx].middle_mac_addr,
3168 &data->config_table[idx].lsb_mac_addr,
3169 cfg_data->mac);
3170
3171 data->config_table[idx].vlan_id = 0;
3172 data->config_table[idx].pf_id = r->func_id;
3173 data->config_table[idx].clients_bit_vector =
3174 cpu_to_le32(1 << r->cl_id);
3175
3176 SET_FLAG(data->config_table[idx].flags,
3177 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3178 T_ETH_MAC_COMMAND_SET);
3179 }
3180}
3181
3182/**
3183 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3184 *
3185 * @bp: device handle
3186 * @p:
3187 * @len: number of rules to handle
3188 */
3189static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3190 struct bnx2x_mcast_ramrod_params *p,
3191 u8 len)
3192{
3193 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3194 struct mac_configuration_cmd *data =
3195 (struct mac_configuration_cmd *)(r->rdata);
3196
3197 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3198 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3199 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3200
3201 data->hdr.offset = offset;
3202 data->hdr.client_id = 0xff;
3203 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3204 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3205 data->hdr.length = len;
3206}
3207
3208/**
3209 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3210 *
3211 * @bp: device handle
3212 * @o:
3213 * @start_idx: index in the registry to start from
3214 * @rdata_idx: index in the ramrod data to start from
3215 *
3216 * restore command for 57710 is like all other commands - always a stand alone
3217 * command - start_idx and rdata_idx will always be 0. This function will always
3218 * succeed.
3219 * returns -1 to comply with 57712 variant.
3220 */
3221static inline int bnx2x_mcast_handle_restore_cmd_e1(
3222 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3223 int *rdata_idx)
3224{
3225 struct bnx2x_mcast_mac_elem *elem;
3226 int i = 0;
3227 union bnx2x_mcast_config_data cfg_data = {0};
3228
3229 /* go through the registry and configure the MACs from it. */
3230 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3231 cfg_data.mac = &elem->mac[0];
3232 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3233
3234 i++;
3235
3236 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3237 " mcast MAC\n",
3238 BNX2X_MAC_PRN_LIST(cfg_data.mac));
3239 }
3240
3241 *rdata_idx = i;
3242
3243 return -1;
3244}
3245
3246
3247static inline int bnx2x_mcast_handle_pending_cmds_e1(
3248 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3249{
3250 struct bnx2x_pending_mcast_cmd *cmd_pos;
3251 struct bnx2x_mcast_mac_elem *pmac_pos;
3252 struct bnx2x_mcast_obj *o = p->mcast_obj;
3253 union bnx2x_mcast_config_data cfg_data = {0};
3254 int cnt = 0;
3255
3256
3257 /* If nothing to be done - return */
3258 if (list_empty(&o->pending_cmds_head))
3259 return 0;
3260
3261 /* Handle the first command */
3262 cmd_pos = list_first_entry(&o->pending_cmds_head,
3263 struct bnx2x_pending_mcast_cmd, link);
3264
3265 switch (cmd_pos->type) {
3266 case BNX2X_MCAST_CMD_ADD:
3267 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3268 cfg_data.mac = &pmac_pos->mac[0];
3269 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3270
3271 cnt++;
3272
3273 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3274 " mcast MAC\n",
3275 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
3276 }
3277 break;
3278
3279 case BNX2X_MCAST_CMD_DEL:
3280 cnt = cmd_pos->data.macs_num;
3281 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3282 break;
3283
3284 case BNX2X_MCAST_CMD_RESTORE:
3285 o->hdl_restore(bp, o, 0, &cnt);
3286 break;
3287
3288 default:
3289 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3290 return -EINVAL;
3291 }
3292
3293 list_del(&cmd_pos->link);
3294 kfree(cmd_pos);
3295
3296 return cnt;
3297}
3298
3299/**
3300 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3301 *
3302 * @fw_hi:
3303 * @fw_mid:
3304 * @fw_lo:
3305 * @mac:
3306 */
3307static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3308 __le16 *fw_lo, u8 *mac)
3309{
3310 mac[1] = ((u8 *)fw_hi)[0];
3311 mac[0] = ((u8 *)fw_hi)[1];
3312 mac[3] = ((u8 *)fw_mid)[0];
3313 mac[2] = ((u8 *)fw_mid)[1];
3314 mac[5] = ((u8 *)fw_lo)[0];
3315 mac[4] = ((u8 *)fw_lo)[1];
3316}
3317
3318/**
3319 * bnx2x_mcast_refresh_registry_e1 -
3320 *
3321 * @bp: device handle
3322 * @cnt:
3323 *
3324 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3325 * and update the registry correspondingly: if ADD - allocate a memory and add
3326 * the entries to the registry (list), if DELETE - clear the registry and free
3327 * the memory.
3328 */
3329static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3330 struct bnx2x_mcast_obj *o)
3331{
3332 struct bnx2x_raw_obj *raw = &o->raw;
3333 struct bnx2x_mcast_mac_elem *elem;
3334 struct mac_configuration_cmd *data =
3335 (struct mac_configuration_cmd *)(raw->rdata);
3336
3337 /* If first entry contains a SET bit - the command was ADD,
3338 * otherwise - DEL_ALL
3339 */
3340 if (GET_FLAG(data->config_table[0].flags,
3341 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3342 int i, len = data->hdr.length;
3343
3344 /* Break if it was a RESTORE command */
3345 if (!list_empty(&o->registry.exact_match.macs))
3346 return 0;
3347
3348 elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3349 if (!elem) {
3350 BNX2X_ERR("Failed to allocate registry memory\n");
3351 return -ENOMEM;
3352 }
3353
3354 for (i = 0; i < len; i++, elem++) {
3355 bnx2x_get_fw_mac_addr(
3356 &data->config_table[i].msb_mac_addr,
3357 &data->config_table[i].middle_mac_addr,
3358 &data->config_table[i].lsb_mac_addr,
3359 elem->mac);
3360 DP(BNX2X_MSG_SP, "Adding registry entry for ["
3361 BNX2X_MAC_FMT"]\n",
3362 BNX2X_MAC_PRN_LIST(elem->mac));
3363 list_add_tail(&elem->link,
3364 &o->registry.exact_match.macs);
3365 }
3366 } else {
3367 elem = list_first_entry(&o->registry.exact_match.macs,
3368 struct bnx2x_mcast_mac_elem, link);
3369 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3370 kfree(elem);
3371 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3372 }
3373
3374 return 0;
3375}
3376
3377static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3378 struct bnx2x_mcast_ramrod_params *p,
3379 int cmd)
3380{
3381 struct bnx2x_mcast_obj *o = p->mcast_obj;
3382 struct bnx2x_raw_obj *raw = &o->raw;
3383 struct mac_configuration_cmd *data =
3384 (struct mac_configuration_cmd *)(raw->rdata);
3385 int cnt = 0, i, rc;
3386
3387 /* Reset the ramrod data buffer */
3388 memset(data, 0, sizeof(*data));
3389
3390 /* First set all entries as invalid */
3391 for (i = 0; i < o->max_cmd_len ; i++)
3392 SET_FLAG(data->config_table[i].flags,
3393 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3394 T_ETH_MAC_COMMAND_INVALIDATE);
3395
3396 /* Handle pending commands first */
3397 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3398
3399 /* If there are no more pending commands - clear SCHEDULED state */
3400 if (list_empty(&o->pending_cmds_head))
3401 o->clear_sched(o);
3402
3403 /* The below may be true iff there were no pending commands */
3404 if (!cnt)
3405 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3406
3407 /* For 57710 every command has o->max_cmd_len length to ensure that
3408 * commands are done one at a time.
3409 */
3410 o->total_pending_num -= o->max_cmd_len;
3411
3412 /* send a ramrod */
3413
3414 WARN_ON(cnt > o->max_cmd_len);
3415
3416 /* Set ramrod header (in particular, a number of entries to update) */
3417 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3418
3419 /* update a registry: we need the registry contents to be always up
3420 * to date in order to be able to execute a RESTORE opcode. Here
3421 * we use the fact that for 57710 we sent one command at a time
3422 * hence we may take the registry update out of the command handling
3423 * and do it in a simpler way here.
3424 */
3425 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3426 if (rc)
3427 return rc;
3428
3429 /*
3430 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3431 * RAMROD_PENDING status immediately.
3432 */
3433 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3434 raw->clear_pending(raw);
3435 return 0;
3436 } else {
3437 /*
3438 * No need for an explicit memory barrier here as long we would
3439 * need to ensure the ordering of writing to the SPQ element
3440 * and updating of the SPQ producer which involves a memory
3441 * read and we will have to put a full memory barrier there
3442 * (inside bnx2x_sp_post()).
3443 */
3444
3445 /* Send a ramrod */
3446 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3447 U64_HI(raw->rdata_mapping),
3448 U64_LO(raw->rdata_mapping),
3449 ETH_CONNECTION_TYPE);
3450 if (rc)
3451 return rc;
3452
3453 /* Ramrod completion is pending */
3454 return 1;
3455 }
3456
3457}
3458
3459static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3460{
3461 return o->registry.exact_match.num_macs_set;
3462}
3463
3464static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3465{
3466 return o->registry.aprox_match.num_bins_set;
3467}
3468
3469static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3470 int n)
3471{
3472 o->registry.exact_match.num_macs_set = n;
3473}
3474
3475static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3476 int n)
3477{
3478 o->registry.aprox_match.num_bins_set = n;
3479}
3480
3481int bnx2x_config_mcast(struct bnx2x *bp,
3482 struct bnx2x_mcast_ramrod_params *p,
3483 int cmd)
3484{
3485 struct bnx2x_mcast_obj *o = p->mcast_obj;
3486 struct bnx2x_raw_obj *r = &o->raw;
3487 int rc = 0, old_reg_size;
3488
3489 /* This is needed to recover number of currently configured mcast macs
3490 * in case of failure.
3491 */
3492 old_reg_size = o->get_registry_size(o);
3493
3494 /* Do some calculations and checks */
3495 rc = o->validate(bp, p, cmd);
3496 if (rc)
3497 return rc;
3498
3499 /* Return if there is no work to do */
3500 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3501 return 0;
3502
3503 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3504 "o->max_cmd_len=%d\n", o->total_pending_num,
3505 p->mcast_list_len, o->max_cmd_len);
3506
3507 /* Enqueue the current command to the pending list if we can't complete
3508 * it in the current iteration
3509 */
3510 if (r->check_pending(r) ||
3511 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3512 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3513 if (rc < 0)
3514 goto error_exit1;
3515
3516 /* As long as the current command is in a command list we
3517 * don't need to handle it separately.
3518 */
3519 p->mcast_list_len = 0;
3520 }
3521
3522 if (!r->check_pending(r)) {
3523
3524 /* Set 'pending' state */
3525 r->set_pending(r);
3526
3527 /* Configure the new classification in the chip */
3528 rc = o->config_mcast(bp, p, cmd);
3529 if (rc < 0)
3530 goto error_exit2;
3531
3532 /* Wait for a ramrod completion if was requested */
3533 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3534 rc = o->wait_comp(bp, o);
3535 }
3536
3537 return rc;
3538
3539error_exit2:
3540 r->clear_pending(r);
3541
3542error_exit1:
3543 o->revert(bp, p, old_reg_size);
3544
3545 return rc;
3546}
3547
3548static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3549{
3550 smp_mb__before_clear_bit();
3551 clear_bit(o->sched_state, o->raw.pstate);
3552 smp_mb__after_clear_bit();
3553}
3554
3555static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3556{
3557 smp_mb__before_clear_bit();
3558 set_bit(o->sched_state, o->raw.pstate);
3559 smp_mb__after_clear_bit();
3560}
3561
3562static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3563{
3564 return !!test_bit(o->sched_state, o->raw.pstate);
3565}
3566
3567static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3568{
3569 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3570}
3571
3572void bnx2x_init_mcast_obj(struct bnx2x *bp,
3573 struct bnx2x_mcast_obj *mcast_obj,
3574 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3575 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3576 int state, unsigned long *pstate, bnx2x_obj_type type)
3577{
3578 memset(mcast_obj, 0, sizeof(*mcast_obj));
3579
3580 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3581 rdata, rdata_mapping, state, pstate, type);
3582
3583 mcast_obj->engine_id = engine_id;
3584
3585 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3586
3587 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3588 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3589 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3590 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3591
3592 if (CHIP_IS_E1(bp)) {
3593 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3594 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3595 mcast_obj->hdl_restore =
3596 bnx2x_mcast_handle_restore_cmd_e1;
3597 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3598
3599 if (CHIP_REV_IS_SLOW(bp))
3600 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3601 else
3602 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3603
3604 mcast_obj->wait_comp = bnx2x_mcast_wait;
3605 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3606 mcast_obj->validate = bnx2x_mcast_validate_e1;
3607 mcast_obj->revert = bnx2x_mcast_revert_e1;
3608 mcast_obj->get_registry_size =
3609 bnx2x_mcast_get_registry_size_exact;
3610 mcast_obj->set_registry_size =
3611 bnx2x_mcast_set_registry_size_exact;
3612
3613 /* 57710 is the only chip that uses the exact match for mcast
3614 * at the moment.
3615 */
3616 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3617
3618 } else if (CHIP_IS_E1H(bp)) {
3619 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3620 mcast_obj->enqueue_cmd = NULL;
3621 mcast_obj->hdl_restore = NULL;
3622 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3623
3624 /* 57711 doesn't send a ramrod, so it has unlimited credit
3625 * for one command.
3626 */
3627 mcast_obj->max_cmd_len = -1;
3628 mcast_obj->wait_comp = bnx2x_mcast_wait;
3629 mcast_obj->set_one_rule = NULL;
3630 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3631 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3632 mcast_obj->get_registry_size =
3633 bnx2x_mcast_get_registry_size_aprox;
3634 mcast_obj->set_registry_size =
3635 bnx2x_mcast_set_registry_size_aprox;
3636 } else {
3637 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3638 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3639 mcast_obj->hdl_restore =
3640 bnx2x_mcast_handle_restore_cmd_e2;
3641 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3642 /* TODO: There should be a proper HSI define for this number!!!
3643 */
3644 mcast_obj->max_cmd_len = 16;
3645 mcast_obj->wait_comp = bnx2x_mcast_wait;
3646 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3647 mcast_obj->validate = bnx2x_mcast_validate_e2;
3648 mcast_obj->revert = bnx2x_mcast_revert_e2;
3649 mcast_obj->get_registry_size =
3650 bnx2x_mcast_get_registry_size_aprox;
3651 mcast_obj->set_registry_size =
3652 bnx2x_mcast_set_registry_size_aprox;
3653 }
3654}
3655
3656/*************************** Credit handling **********************************/
3657
3658/**
3659 * atomic_add_ifless - add if the result is less than a given value.
3660 *
3661 * @v: pointer of type atomic_t
3662 * @a: the amount to add to v...
3663 * @u: ...if (v + a) is less than u.
3664 *
3665 * returns true if (v + a) was less than u, and false otherwise.
3666 *
3667 */
3668static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3669{
3670 int c, old;
3671
3672 c = atomic_read(v);
3673 for (;;) {
3674 if (unlikely(c + a >= u))
3675 return false;
3676
3677 old = atomic_cmpxchg((v), c, c + a);
3678 if (likely(old == c))
3679 break;
3680 c = old;
3681 }
3682
3683 return true;
3684}
3685
3686/**
3687 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3688 *
3689 * @v: pointer of type atomic_t
3690 * @a: the amount to dec from v...
3691 * @u: ...if (v - a) is more or equal than u.
3692 *
3693 * returns true if (v - a) was more or equal than u, and false
3694 * otherwise.
3695 */
3696static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3697{
3698 int c, old;
3699
3700 c = atomic_read(v);
3701 for (;;) {
3702 if (unlikely(c - a < u))
3703 return false;
3704
3705 old = atomic_cmpxchg((v), c, c - a);
3706 if (likely(old == c))
3707 break;
3708 c = old;
3709 }
3710
3711 return true;
3712}
3713
3714static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3715{
3716 bool rc;
3717
3718 smp_mb();
3719 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3720 smp_mb();
3721
3722 return rc;
3723}
3724
3725static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3726{
3727 bool rc;
3728
3729 smp_mb();
3730
3731 /* Don't let to refill if credit + cnt > pool_sz */
3732 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3733
3734 smp_mb();
3735
3736 return rc;
3737}
3738
3739static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3740{
3741 int cur_credit;
3742
3743 smp_mb();
3744 cur_credit = atomic_read(&o->credit);
3745
3746 return cur_credit;
3747}
3748
3749static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3750 int cnt)
3751{
3752 return true;
3753}
3754
3755
3756static bool bnx2x_credit_pool_get_entry(
3757 struct bnx2x_credit_pool_obj *o,
3758 int *offset)
3759{
3760 int idx, vec, i;
3761
3762 *offset = -1;
3763
3764 /* Find "internal cam-offset" then add to base for this object... */
3765 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3766
3767 /* Skip the current vector if there are no free entries in it */
3768 if (!o->pool_mirror[vec])
3769 continue;
3770
3771 /* If we've got here we are going to find a free entry */
3772 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3773 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3774
3775 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3776 /* Got one!! */
3777 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3778 *offset = o->base_pool_offset + idx;
3779 return true;
3780 }
3781 }
3782
3783 return false;
3784}
3785
3786static bool bnx2x_credit_pool_put_entry(
3787 struct bnx2x_credit_pool_obj *o,
3788 int offset)
3789{
3790 if (offset < o->base_pool_offset)
3791 return false;
3792
3793 offset -= o->base_pool_offset;
3794
3795 if (offset >= o->pool_sz)
3796 return false;
3797
3798 /* Return the entry to the pool */
3799 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3800
3801 return true;
3802}
3803
3804static bool bnx2x_credit_pool_put_entry_always_true(
3805 struct bnx2x_credit_pool_obj *o,
3806 int offset)
3807{
3808 return true;
3809}
3810
3811static bool bnx2x_credit_pool_get_entry_always_true(
3812 struct bnx2x_credit_pool_obj *o,
3813 int *offset)
3814{
3815 *offset = -1;
3816 return true;
3817}
3818/**
3819 * bnx2x_init_credit_pool - initialize credit pool internals.
3820 *
3821 * @p:
3822 * @base: Base entry in the CAM to use.
3823 * @credit: pool size.
3824 *
3825 * If base is negative no CAM entries handling will be performed.
3826 * If credit is negative pool operations will always succeed (unlimited pool).
3827 *
3828 */
3829static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3830 int base, int credit)
3831{
3832 /* Zero the object first */
3833 memset(p, 0, sizeof(*p));
3834
3835 /* Set the table to all 1s */
3836 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3837
3838 /* Init a pool as full */
3839 atomic_set(&p->credit, credit);
3840
3841 /* The total poll size */
3842 p->pool_sz = credit;
3843
3844 p->base_pool_offset = base;
3845
3846 /* Commit the change */
3847 smp_mb();
3848
3849 p->check = bnx2x_credit_pool_check;
3850
3851 /* if pool credit is negative - disable the checks */
3852 if (credit >= 0) {
3853 p->put = bnx2x_credit_pool_put;
3854 p->get = bnx2x_credit_pool_get;
3855 p->put_entry = bnx2x_credit_pool_put_entry;
3856 p->get_entry = bnx2x_credit_pool_get_entry;
3857 } else {
3858 p->put = bnx2x_credit_pool_always_true;
3859 p->get = bnx2x_credit_pool_always_true;
3860 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3861 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3862 }
3863
3864 /* If base is negative - disable entries handling */
3865 if (base < 0) {
3866 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3867 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3868 }
3869}
3870
3871void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3872 struct bnx2x_credit_pool_obj *p, u8 func_id,
3873 u8 func_num)
3874{
3875/* TODO: this will be defined in consts as well... */
3876#define BNX2X_CAM_SIZE_EMUL 5
3877
3878 int cam_sz;
3879
3880 if (CHIP_IS_E1(bp)) {
3881 /* In E1, Multicast is saved in cam... */
3882 if (!CHIP_REV_IS_SLOW(bp))
3883 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3884 else
3885 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3886
3887 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3888
3889 } else if (CHIP_IS_E1H(bp)) {
3890 /* CAM credit is equaly divided between all active functions
3891 * on the PORT!.
3892 */
3893 if ((func_num > 0)) {
3894 if (!CHIP_REV_IS_SLOW(bp))
3895 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3896 else
3897 cam_sz = BNX2X_CAM_SIZE_EMUL;
3898 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3899 } else {
3900 /* this should never happen! Block MAC operations. */
3901 bnx2x_init_credit_pool(p, 0, 0);
3902 }
3903
3904 } else {
3905
3906 /*
3907 * CAM credit is equaly divided between all active functions
3908 * on the PATH.
3909 */
3910 if ((func_num > 0)) {
3911 if (!CHIP_REV_IS_SLOW(bp))
3912 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3913 else
3914 cam_sz = BNX2X_CAM_SIZE_EMUL;
3915
3916 /*
3917 * No need for CAM entries handling for 57712 and
3918 * newer.
3919 */
3920 bnx2x_init_credit_pool(p, -1, cam_sz);
3921 } else {
3922 /* this should never happen! Block MAC operations. */
3923 bnx2x_init_credit_pool(p, 0, 0);
3924 }
3925
3926 }
3927}
3928
3929void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3930 struct bnx2x_credit_pool_obj *p,
3931 u8 func_id,
3932 u8 func_num)
3933{
3934 if (CHIP_IS_E1x(bp)) {
3935 /*
3936 * There is no VLAN credit in HW on 57710 and 57711 only
3937 * MAC / MAC-VLAN can be set
3938 */
3939 bnx2x_init_credit_pool(p, 0, -1);
3940 } else {
3941 /*
3942 * CAM credit is equaly divided between all active functions
3943 * on the PATH.
3944 */
3945 if (func_num > 0) {
3946 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3947 bnx2x_init_credit_pool(p, func_id * credit, credit);
3948 } else
3949 /* this should never happen! Block VLAN operations. */
3950 bnx2x_init_credit_pool(p, 0, 0);
3951 }
3952}
3953
3954/****************** RSS Configuration ******************/
3955/**
3956 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3957 *
3958 * @bp: driver hanlde
3959 * @p: pointer to rss configuration
3960 *
3961 * Prints it when NETIF_MSG_IFUP debug level is configured.
3962 */
3963static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3964 struct bnx2x_config_rss_params *p)
3965{
3966 int i;
3967
3968 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3969 DP(BNX2X_MSG_SP, "0x0000: ");
3970 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3971 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3972
3973 /* Print 4 bytes in a line */
3974 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3975 (((i + 1) & 0x3) == 0)) {
3976 DP_CONT(BNX2X_MSG_SP, "\n");
3977 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3978 }
3979 }
3980
3981 DP_CONT(BNX2X_MSG_SP, "\n");
3982}
3983
3984/**
3985 * bnx2x_setup_rss - configure RSS
3986 *
3987 * @bp: device handle
3988 * @p: rss configuration
3989 *
3990 * sends on UPDATE ramrod for that matter.
3991 */
3992static int bnx2x_setup_rss(struct bnx2x *bp,
3993 struct bnx2x_config_rss_params *p)
3994{
3995 struct bnx2x_rss_config_obj *o = p->rss_obj;
3996 struct bnx2x_raw_obj *r = &o->raw;
3997 struct eth_rss_update_ramrod_data *data =
3998 (struct eth_rss_update_ramrod_data *)(r->rdata);
3999 u8 rss_mode = 0;
4000 int rc;
4001
4002 memset(data, 0, sizeof(*data));
4003
4004 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4005
4006 /* Set an echo field */
4007 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4008 (r->state << BNX2X_SWCID_SHIFT);
4009
4010 /* RSS mode */
4011 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4012 rss_mode = ETH_RSS_MODE_DISABLED;
4013 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4014 rss_mode = ETH_RSS_MODE_REGULAR;
4015 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4016 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4017 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4018 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4019 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4020 rss_mode = ETH_RSS_MODE_IP_DSCP;
4021
4022 data->rss_mode = rss_mode;
4023
4024 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4025
4026 /* RSS capabilities */
4027 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4028 data->capabilities |=
4029 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4030
4031 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4032 data->capabilities |=
4033 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4034
4035 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4036 data->capabilities |=
4037 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4038
4039 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4040 data->capabilities |=
4041 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4042
4043 /* Hashing mask */
4044 data->rss_result_mask = p->rss_result_mask;
4045
4046 /* RSS engine ID */
4047 data->rss_engine_id = o->engine_id;
4048
4049 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4050
4051 /* Indirection table */
4052 memcpy(data->indirection_table, p->ind_table,
4053 T_ETH_INDIRECTION_TABLE_SIZE);
4054
4055 /* Remember the last configuration */
4056 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4057
4058 /* Print the indirection table */
4059 if (netif_msg_ifup(bp))
4060 bnx2x_debug_print_ind_table(bp, p);
4061
4062 /* RSS keys */
4063 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4064 memcpy(&data->rss_key[0], &p->rss_key[0],
4065 sizeof(data->rss_key));
4066 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4067 }
4068
4069 /*
4070 * No need for an explicit memory barrier here as long we would
4071 * need to ensure the ordering of writing to the SPQ element
4072 * and updating of the SPQ producer which involves a memory
4073 * read and we will have to put a full memory barrier there
4074 * (inside bnx2x_sp_post()).
4075 */
4076
4077 /* Send a ramrod */
4078 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4079 U64_HI(r->rdata_mapping),
4080 U64_LO(r->rdata_mapping),
4081 ETH_CONNECTION_TYPE);
4082
4083 if (rc < 0)
4084 return rc;
4085
4086 return 1;
4087}
4088
4089void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4090 u8 *ind_table)
4091{
4092 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4093}
4094
4095int bnx2x_config_rss(struct bnx2x *bp,
4096 struct bnx2x_config_rss_params *p)
4097{
4098 int rc;
4099 struct bnx2x_rss_config_obj *o = p->rss_obj;
4100 struct bnx2x_raw_obj *r = &o->raw;
4101
4102 /* Do nothing if only driver cleanup was requested */
4103 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4104 return 0;
4105
4106 r->set_pending(r);
4107
4108 rc = o->config_rss(bp, p);
4109 if (rc < 0) {
4110 r->clear_pending(r);
4111 return rc;
4112 }
4113
4114 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4115 rc = r->wait_comp(bp, r);
4116
4117 return rc;
4118}
4119
4120
4121void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4122 struct bnx2x_rss_config_obj *rss_obj,
4123 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4124 void *rdata, dma_addr_t rdata_mapping,
4125 int state, unsigned long *pstate,
4126 bnx2x_obj_type type)
4127{
4128 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4129 rdata_mapping, state, pstate, type);
4130
4131 rss_obj->engine_id = engine_id;
4132 rss_obj->config_rss = bnx2x_setup_rss;
4133}
4134
4135/********************** Queue state object ***********************************/
4136
4137/**
4138 * bnx2x_queue_state_change - perform Queue state change transition
4139 *
4140 * @bp: device handle
4141 * @params: parameters to perform the transition
4142 *
4143 * returns 0 in case of successfully completed transition, negative error
4144 * code in case of failure, positive (EBUSY) value if there is a completion
4145 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4146 * not set in params->ramrod_flags for asynchronous commands).
4147 *
4148 */
4149int bnx2x_queue_state_change(struct bnx2x *bp,
4150 struct bnx2x_queue_state_params *params)
4151{
4152 struct bnx2x_queue_sp_obj *o = params->q_obj;
4153 int rc, pending_bit;
4154 unsigned long *pending = &o->pending;
4155
4156 /* Check that the requested transition is legal */
4157 if (o->check_transition(bp, o, params))
4158 return -EINVAL;
4159
4160 /* Set "pending" bit */
4161 pending_bit = o->set_pending(o, params);
4162
4163 /* Don't send a command if only driver cleanup was requested */
4164 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4165 o->complete_cmd(bp, o, pending_bit);
4166 else {
4167 /* Send a ramrod */
4168 rc = o->send_cmd(bp, params);
4169 if (rc) {
4170 o->next_state = BNX2X_Q_STATE_MAX;
4171 clear_bit(pending_bit, pending);
4172 smp_mb__after_clear_bit();
4173 return rc;
4174 }
4175
4176 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4177 rc = o->wait_comp(bp, o, pending_bit);
4178 if (rc)
4179 return rc;
4180
4181 return 0;
4182 }
4183 }
4184
4185 return !!test_bit(pending_bit, pending);
4186}
4187
4188
4189static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4190 struct bnx2x_queue_state_params *params)
4191{
4192 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4193
4194 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4195 * UPDATE command.
4196 */
4197 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4198 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4199 bit = BNX2X_Q_CMD_UPDATE;
4200 else
4201 bit = cmd;
4202
4203 set_bit(bit, &obj->pending);
4204 return bit;
4205}
4206
4207static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4208 struct bnx2x_queue_sp_obj *o,
4209 enum bnx2x_queue_cmd cmd)
4210{
4211 return bnx2x_state_wait(bp, cmd, &o->pending);
4212}
4213
4214/**
4215 * bnx2x_queue_comp_cmd - complete the state change command.
4216 *
4217 * @bp: device handle
4218 * @o:
4219 * @cmd:
4220 *
4221 * Checks that the arrived completion is expected.
4222 */
4223static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4224 struct bnx2x_queue_sp_obj *o,
4225 enum bnx2x_queue_cmd cmd)
4226{
4227 unsigned long cur_pending = o->pending;
4228
4229 if (!test_and_clear_bit(cmd, &cur_pending)) {
4230 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4231 "pending 0x%lx, next_state %d\n", cmd,
4232 o->cids[BNX2X_PRIMARY_CID_INDEX],
4233 o->state, cur_pending, o->next_state);
4234 return -EINVAL;
4235 }
4236
4237 if (o->next_tx_only >= o->max_cos)
4238 /* >= becuase tx only must always be smaller than cos since the
4239 * primary connection suports COS 0
4240 */
4241 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4242 o->next_tx_only, o->max_cos);
4243
4244 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4245 "setting state to %d\n", cmd,
4246 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4247
4248 if (o->next_tx_only) /* print num tx-only if any exist */
4249 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
4250 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4251
4252 o->state = o->next_state;
4253 o->num_tx_only = o->next_tx_only;
4254 o->next_state = BNX2X_Q_STATE_MAX;
4255
4256 /* It's important that o->state and o->next_state are
4257 * updated before o->pending.
4258 */
4259 wmb();
4260
4261 clear_bit(cmd, &o->pending);
4262 smp_mb__after_clear_bit();
4263
4264 return 0;
4265}
4266
4267static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4268 struct bnx2x_queue_state_params *cmd_params,
4269 struct client_init_ramrod_data *data)
4270{
4271 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4272
4273 /* Rx data */
4274
4275 /* IPv6 TPA supported for E2 and above only */
4276 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4277 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4278}
4279
4280static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4281 struct bnx2x_queue_sp_obj *o,
4282 struct bnx2x_general_setup_params *params,
4283 struct client_init_general_data *gen_data,
4284 unsigned long *flags)
4285{
4286 gen_data->client_id = o->cl_id;
4287
4288 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4289 gen_data->statistics_counter_id =
4290 params->stat_id;
4291 gen_data->statistics_en_flg = 1;
4292 gen_data->statistics_zero_flg =
4293 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4294 } else
4295 gen_data->statistics_counter_id =
4296 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4297
4298 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4299 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4300 gen_data->sp_client_id = params->spcl_id;
4301 gen_data->mtu = cpu_to_le16(params->mtu);
4302 gen_data->func_id = o->func_id;
4303
4304
4305 gen_data->cos = params->cos;
4306
4307 gen_data->traffic_type =
4308 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4309 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4310
4311 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
4312 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4313}
4314
4315static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4316 struct bnx2x_txq_setup_params *params,
4317 struct client_init_tx_data *tx_data,
4318 unsigned long *flags)
4319{
4320 tx_data->enforce_security_flg =
4321 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4322 tx_data->default_vlan =
4323 cpu_to_le16(params->default_vlan);
4324 tx_data->default_vlan_flg =
4325 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4326 tx_data->tx_switching_flg =
4327 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4328 tx_data->anti_spoofing_flg =
4329 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4330 tx_data->tx_status_block_id = params->fw_sb_id;
4331 tx_data->tx_sb_index_number = params->sb_cq_index;
4332 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4333
4334 tx_data->tx_bd_page_base.lo =
4335 cpu_to_le32(U64_LO(params->dscr_map));
4336 tx_data->tx_bd_page_base.hi =
4337 cpu_to_le32(U64_HI(params->dscr_map));
4338
4339 /* Don't configure any Tx switching mode during queue SETUP */
4340 tx_data->state = 0;
4341}
4342
4343static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4344 struct rxq_pause_params *params,
4345 struct client_init_rx_data *rx_data)
4346{
4347 /* flow control data */
4348 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4349 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4350 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4351 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4352 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4353 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4354 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4355}
4356
4357static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4358 struct bnx2x_rxq_setup_params *params,
4359 struct client_init_rx_data *rx_data,
4360 unsigned long *flags)
4361{
4362 /* Rx data */
4363 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4364 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4365 rx_data->vmqueue_mode_en_flg = 0;
4366
4367 rx_data->cache_line_alignment_log_size =
4368 params->cache_line_log;
4369 rx_data->enable_dynamic_hc =
4370 test_bit(BNX2X_Q_FLG_DHC, flags);
4371 rx_data->max_sges_for_packet = params->max_sges_pkt;
4372 rx_data->client_qzone_id = params->cl_qzone_id;
4373 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4374
4375 /* Always start in DROP_ALL mode */
4376 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4377 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4378
4379 /* We don't set drop flags */
4380 rx_data->drop_ip_cs_err_flg = 0;
4381 rx_data->drop_tcp_cs_err_flg = 0;
4382 rx_data->drop_ttl0_flg = 0;
4383 rx_data->drop_udp_cs_err_flg = 0;
4384 rx_data->inner_vlan_removal_enable_flg =
4385 test_bit(BNX2X_Q_FLG_VLAN, flags);
4386 rx_data->outer_vlan_removal_enable_flg =
4387 test_bit(BNX2X_Q_FLG_OV, flags);
4388 rx_data->status_block_id = params->fw_sb_id;
4389 rx_data->rx_sb_index_number = params->sb_cq_index;
4390 rx_data->max_tpa_queues = params->max_tpa_queues;
4391 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4392 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4393 rx_data->bd_page_base.lo =
4394 cpu_to_le32(U64_LO(params->dscr_map));
4395 rx_data->bd_page_base.hi =
4396 cpu_to_le32(U64_HI(params->dscr_map));
4397 rx_data->sge_page_base.lo =
4398 cpu_to_le32(U64_LO(params->sge_map));
4399 rx_data->sge_page_base.hi =
4400 cpu_to_le32(U64_HI(params->sge_map));
4401 rx_data->cqe_page_base.lo =
4402 cpu_to_le32(U64_LO(params->rcq_map));
4403 rx_data->cqe_page_base.hi =
4404 cpu_to_le32(U64_HI(params->rcq_map));
4405 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4406
4407 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4408 rx_data->approx_mcast_engine_id = o->func_id;
4409 rx_data->is_approx_mcast = 1;
4410 }
4411
4412 rx_data->rss_engine_id = params->rss_engine_id;
4413
4414 /* silent vlan removal */
4415 rx_data->silent_vlan_removal_flg =
4416 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4417 rx_data->silent_vlan_value =
4418 cpu_to_le16(params->silent_removal_value);
4419 rx_data->silent_vlan_mask =
4420 cpu_to_le16(params->silent_removal_mask);
4421
4422}
4423
4424/* initialize the general, tx and rx parts of a queue object */
4425static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4426 struct bnx2x_queue_state_params *cmd_params,
4427 struct client_init_ramrod_data *data)
4428{
4429 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4430 &cmd_params->params.setup.gen_params,
4431 &data->general,
4432 &cmd_params->params.setup.flags);
4433
4434 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4435 &cmd_params->params.setup.txq_params,
4436 &data->tx,
4437 &cmd_params->params.setup.flags);
4438
4439 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4440 &cmd_params->params.setup.rxq_params,
4441 &data->rx,
4442 &cmd_params->params.setup.flags);
4443
4444 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4445 &cmd_params->params.setup.pause_params,
4446 &data->rx);
4447}
4448
4449/* initialize the general and tx parts of a tx-only queue object */
4450static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4451 struct bnx2x_queue_state_params *cmd_params,
4452 struct tx_queue_init_ramrod_data *data)
4453{
4454 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4455 &cmd_params->params.tx_only.gen_params,
4456 &data->general,
4457 &cmd_params->params.tx_only.flags);
4458
4459 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4460 &cmd_params->params.tx_only.txq_params,
4461 &data->tx,
4462 &cmd_params->params.tx_only.flags);
4463
4464 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
4465 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4466}
4467
4468/**
4469 * bnx2x_q_init - init HW/FW queue
4470 *
4471 * @bp: device handle
4472 * @params:
4473 *
4474 * HW/FW initial Queue configuration:
4475 * - HC: Rx and Tx
4476 * - CDU context validation
4477 *
4478 */
4479static inline int bnx2x_q_init(struct bnx2x *bp,
4480 struct bnx2x_queue_state_params *params)
4481{
4482 struct bnx2x_queue_sp_obj *o = params->q_obj;
4483 struct bnx2x_queue_init_params *init = &params->params.init;
4484 u16 hc_usec;
4485 u8 cos;
4486
4487 /* Tx HC configuration */
4488 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4489 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4490 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4491
4492 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4493 init->tx.sb_cq_index,
4494 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4495 hc_usec);
4496 }
4497
4498 /* Rx HC configuration */
4499 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4500 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4501 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4502
4503 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4504 init->rx.sb_cq_index,
4505 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4506 hc_usec);
4507 }
4508
4509 /* Set CDU context validation values */
4510 for (cos = 0; cos < o->max_cos; cos++) {
4511 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
4512 o->cids[cos], cos);
4513 DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
4514 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4515 }
4516
4517 /* As no ramrod is sent, complete the command immediately */
4518 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4519
4520 mmiowb();
4521 smp_mb();
4522
4523 return 0;
4524}
4525
4526static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4527 struct bnx2x_queue_state_params *params)
4528{
4529 struct bnx2x_queue_sp_obj *o = params->q_obj;
4530 struct client_init_ramrod_data *rdata =
4531 (struct client_init_ramrod_data *)o->rdata;
4532 dma_addr_t data_mapping = o->rdata_mapping;
4533 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4534
4535 /* Clear the ramrod data */
4536 memset(rdata, 0, sizeof(*rdata));
4537
4538 /* Fill the ramrod data */
4539 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4540
4541 /*
4542 * No need for an explicit memory barrier here as long we would
4543 * need to ensure the ordering of writing to the SPQ element
4544 * and updating of the SPQ producer which involves a memory
4545 * read and we will have to put a full memory barrier there
4546 * (inside bnx2x_sp_post()).
4547 */
4548
4549 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4550 U64_HI(data_mapping),
4551 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4552}
4553
4554static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4555 struct bnx2x_queue_state_params *params)
4556{
4557 struct bnx2x_queue_sp_obj *o = params->q_obj;
4558 struct client_init_ramrod_data *rdata =
4559 (struct client_init_ramrod_data *)o->rdata;
4560 dma_addr_t data_mapping = o->rdata_mapping;
4561 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4562
4563 /* Clear the ramrod data */
4564 memset(rdata, 0, sizeof(*rdata));
4565
4566 /* Fill the ramrod data */
4567 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4568 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4569
4570 /*
4571 * No need for an explicit memory barrier here as long we would
4572 * need to ensure the ordering of writing to the SPQ element
4573 * and updating of the SPQ producer which involves a memory
4574 * read and we will have to put a full memory barrier there
4575 * (inside bnx2x_sp_post()).
4576 */
4577
4578 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4579 U64_HI(data_mapping),
4580 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4581}
4582
4583static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4584 struct bnx2x_queue_state_params *params)
4585{
4586 struct bnx2x_queue_sp_obj *o = params->q_obj;
4587 struct tx_queue_init_ramrod_data *rdata =
4588 (struct tx_queue_init_ramrod_data *)o->rdata;
4589 dma_addr_t data_mapping = o->rdata_mapping;
4590 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4591 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4592 &params->params.tx_only;
4593 u8 cid_index = tx_only_params->cid_index;
4594
4595
4596 if (cid_index >= o->max_cos) {
4597 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4598 o->cl_id, cid_index);
4599 return -EINVAL;
4600 }
4601
4602 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
4603 tx_only_params->gen_params.cos,
4604 tx_only_params->gen_params.spcl_id);
4605
4606 /* Clear the ramrod data */
4607 memset(rdata, 0, sizeof(*rdata));
4608
4609 /* Fill the ramrod data */
4610 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4611
4612 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4613 "sp-client id %d, cos %d",
4614 o->cids[cid_index],
4615 rdata->general.client_id,
4616 rdata->general.sp_client_id, rdata->general.cos);
4617
4618 /*
4619 * No need for an explicit memory barrier here as long we would
4620 * need to ensure the ordering of writing to the SPQ element
4621 * and updating of the SPQ producer which involves a memory
4622 * read and we will have to put a full memory barrier there
4623 * (inside bnx2x_sp_post()).
4624 */
4625
4626 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4627 U64_HI(data_mapping),
4628 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4629}
4630
4631static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4632 struct bnx2x_queue_sp_obj *obj,
4633 struct bnx2x_queue_update_params *params,
4634 struct client_update_ramrod_data *data)
4635{
4636 /* Client ID of the client to update */
4637 data->client_id = obj->cl_id;
4638
4639 /* Function ID of the client to update */
4640 data->func_id = obj->func_id;
4641
4642 /* Default VLAN value */
4643 data->default_vlan = cpu_to_le16(params->def_vlan);
4644
4645 /* Inner VLAN stripping */
4646 data->inner_vlan_removal_enable_flg =
4647 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4648 data->inner_vlan_removal_change_flg =
4649 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4650 &params->update_flags);
4651
4652 /* Outer VLAN sripping */
4653 data->outer_vlan_removal_enable_flg =
4654 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4655 data->outer_vlan_removal_change_flg =
4656 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4657 &params->update_flags);
4658
4659 /* Drop packets that have source MAC that doesn't belong to this
4660 * Queue.
4661 */
4662 data->anti_spoofing_enable_flg =
4663 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4664 data->anti_spoofing_change_flg =
4665 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4666
4667 /* Activate/Deactivate */
4668 data->activate_flg =
4669 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4670 data->activate_change_flg =
4671 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4672
4673 /* Enable default VLAN */
4674 data->default_vlan_enable_flg =
4675 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4676 data->default_vlan_change_flg =
4677 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4678 &params->update_flags);
4679
4680 /* silent vlan removal */
4681 data->silent_vlan_change_flg =
4682 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4683 &params->update_flags);
4684 data->silent_vlan_removal_flg =
4685 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4686 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4687 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4688}
4689
4690static inline int bnx2x_q_send_update(struct bnx2x *bp,
4691 struct bnx2x_queue_state_params *params)
4692{
4693 struct bnx2x_queue_sp_obj *o = params->q_obj;
4694 struct client_update_ramrod_data *rdata =
4695 (struct client_update_ramrod_data *)o->rdata;
4696 dma_addr_t data_mapping = o->rdata_mapping;
4697 struct bnx2x_queue_update_params *update_params =
4698 &params->params.update;
4699 u8 cid_index = update_params->cid_index;
4700
4701 if (cid_index >= o->max_cos) {
4702 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4703 o->cl_id, cid_index);
4704 return -EINVAL;
4705 }
4706
4707
4708 /* Clear the ramrod data */
4709 memset(rdata, 0, sizeof(*rdata));
4710
4711 /* Fill the ramrod data */
4712 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4713
4714 /*
4715 * No need for an explicit memory barrier here as long we would
4716 * need to ensure the ordering of writing to the SPQ element
4717 * and updating of the SPQ producer which involves a memory
4718 * read and we will have to put a full memory barrier there
4719 * (inside bnx2x_sp_post()).
4720 */
4721
4722 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4723 o->cids[cid_index], U64_HI(data_mapping),
4724 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4725}
4726
4727/**
4728 * bnx2x_q_send_deactivate - send DEACTIVATE command
4729 *
4730 * @bp: device handle
4731 * @params:
4732 *
4733 * implemented using the UPDATE command.
4734 */
4735static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4736 struct bnx2x_queue_state_params *params)
4737{
4738 struct bnx2x_queue_update_params *update = &params->params.update;
4739
4740 memset(update, 0, sizeof(*update));
4741
4742 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4743
4744 return bnx2x_q_send_update(bp, params);
4745}
4746
4747/**
4748 * bnx2x_q_send_activate - send ACTIVATE command
4749 *
4750 * @bp: device handle
4751 * @params:
4752 *
4753 * implemented using the UPDATE command.
4754 */
4755static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4756 struct bnx2x_queue_state_params *params)
4757{
4758 struct bnx2x_queue_update_params *update = &params->params.update;
4759
4760 memset(update, 0, sizeof(*update));
4761
4762 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4763 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4764
4765 return bnx2x_q_send_update(bp, params);
4766}
4767
4768static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4769 struct bnx2x_queue_state_params *params)
4770{
4771 /* TODO: Not implemented yet. */
4772 return -1;
4773}
4774
4775static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4776 struct bnx2x_queue_state_params *params)
4777{
4778 struct bnx2x_queue_sp_obj *o = params->q_obj;
4779
4780 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4781 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4782 ETH_CONNECTION_TYPE);
4783}
4784
4785static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4786 struct bnx2x_queue_state_params *params)
4787{
4788 struct bnx2x_queue_sp_obj *o = params->q_obj;
4789 u8 cid_idx = params->params.cfc_del.cid_index;
4790
4791 if (cid_idx >= o->max_cos) {
4792 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4793 o->cl_id, cid_idx);
4794 return -EINVAL;
4795 }
4796
4797 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4798 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4799}
4800
4801static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4802 struct bnx2x_queue_state_params *params)
4803{
4804 struct bnx2x_queue_sp_obj *o = params->q_obj;
4805 u8 cid_index = params->params.terminate.cid_index;
4806
4807 if (cid_index >= o->max_cos) {
4808 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4809 o->cl_id, cid_index);
4810 return -EINVAL;
4811 }
4812
4813 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4814 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4815}
4816
4817static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4818 struct bnx2x_queue_state_params *params)
4819{
4820 struct bnx2x_queue_sp_obj *o = params->q_obj;
4821
4822 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4823 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4824 ETH_CONNECTION_TYPE);
4825}
4826
4827static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4828 struct bnx2x_queue_state_params *params)
4829{
4830 switch (params->cmd) {
4831 case BNX2X_Q_CMD_INIT:
4832 return bnx2x_q_init(bp, params);
4833 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4834 return bnx2x_q_send_setup_tx_only(bp, params);
4835 case BNX2X_Q_CMD_DEACTIVATE:
4836 return bnx2x_q_send_deactivate(bp, params);
4837 case BNX2X_Q_CMD_ACTIVATE:
4838 return bnx2x_q_send_activate(bp, params);
4839 case BNX2X_Q_CMD_UPDATE:
4840 return bnx2x_q_send_update(bp, params);
4841 case BNX2X_Q_CMD_UPDATE_TPA:
4842 return bnx2x_q_send_update_tpa(bp, params);
4843 case BNX2X_Q_CMD_HALT:
4844 return bnx2x_q_send_halt(bp, params);
4845 case BNX2X_Q_CMD_CFC_DEL:
4846 return bnx2x_q_send_cfc_del(bp, params);
4847 case BNX2X_Q_CMD_TERMINATE:
4848 return bnx2x_q_send_terminate(bp, params);
4849 case BNX2X_Q_CMD_EMPTY:
4850 return bnx2x_q_send_empty(bp, params);
4851 default:
4852 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4853 return -EINVAL;
4854 }
4855}
4856
4857static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4858 struct bnx2x_queue_state_params *params)
4859{
4860 switch (params->cmd) {
4861 case BNX2X_Q_CMD_SETUP:
4862 return bnx2x_q_send_setup_e1x(bp, params);
4863 case BNX2X_Q_CMD_INIT:
4864 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4865 case BNX2X_Q_CMD_DEACTIVATE:
4866 case BNX2X_Q_CMD_ACTIVATE:
4867 case BNX2X_Q_CMD_UPDATE:
4868 case BNX2X_Q_CMD_UPDATE_TPA:
4869 case BNX2X_Q_CMD_HALT:
4870 case BNX2X_Q_CMD_CFC_DEL:
4871 case BNX2X_Q_CMD_TERMINATE:
4872 case BNX2X_Q_CMD_EMPTY:
4873 return bnx2x_queue_send_cmd_cmn(bp, params);
4874 default:
4875 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4876 return -EINVAL;
4877 }
4878}
4879
4880static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4881 struct bnx2x_queue_state_params *params)
4882{
4883 switch (params->cmd) {
4884 case BNX2X_Q_CMD_SETUP:
4885 return bnx2x_q_send_setup_e2(bp, params);
4886 case BNX2X_Q_CMD_INIT:
4887 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4888 case BNX2X_Q_CMD_DEACTIVATE:
4889 case BNX2X_Q_CMD_ACTIVATE:
4890 case BNX2X_Q_CMD_UPDATE:
4891 case BNX2X_Q_CMD_UPDATE_TPA:
4892 case BNX2X_Q_CMD_HALT:
4893 case BNX2X_Q_CMD_CFC_DEL:
4894 case BNX2X_Q_CMD_TERMINATE:
4895 case BNX2X_Q_CMD_EMPTY:
4896 return bnx2x_queue_send_cmd_cmn(bp, params);
4897 default:
4898 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4899 return -EINVAL;
4900 }
4901}
4902
4903/**
4904 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4905 *
4906 * @bp: device handle
4907 * @o:
4908 * @params:
4909 *
4910 * (not Forwarding)
4911 * It both checks if the requested command is legal in a current
4912 * state and, if it's legal, sets a `next_state' in the object
4913 * that will be used in the completion flow to set the `state'
4914 * of the object.
4915 *
4916 * returns 0 if a requested command is a legal transition,
4917 * -EINVAL otherwise.
4918 */
4919static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4920 struct bnx2x_queue_sp_obj *o,
4921 struct bnx2x_queue_state_params *params)
4922{
4923 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4924 enum bnx2x_queue_cmd cmd = params->cmd;
4925 struct bnx2x_queue_update_params *update_params =
4926 &params->params.update;
4927 u8 next_tx_only = o->num_tx_only;
4928
4929 /*
4930 * Forget all pending for completion commands if a driver only state
4931 * transition has been requested.
4932 */
4933 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4934 o->pending = 0;
4935 o->next_state = BNX2X_Q_STATE_MAX;
4936 }
4937
4938 /*
4939 * Don't allow a next state transition if we are in the middle of
4940 * the previous one.
4941 */
4942 if (o->pending)
4943 return -EBUSY;
4944
4945 switch (state) {
4946 case BNX2X_Q_STATE_RESET:
4947 if (cmd == BNX2X_Q_CMD_INIT)
4948 next_state = BNX2X_Q_STATE_INITIALIZED;
4949
4950 break;
4951 case BNX2X_Q_STATE_INITIALIZED:
4952 if (cmd == BNX2X_Q_CMD_SETUP) {
4953 if (test_bit(BNX2X_Q_FLG_ACTIVE,
4954 &params->params.setup.flags))
4955 next_state = BNX2X_Q_STATE_ACTIVE;
4956 else
4957 next_state = BNX2X_Q_STATE_INACTIVE;
4958 }
4959
4960 break;
4961 case BNX2X_Q_STATE_ACTIVE:
4962 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4963 next_state = BNX2X_Q_STATE_INACTIVE;
4964
4965 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4966 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4967 next_state = BNX2X_Q_STATE_ACTIVE;
4968
4969 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4970 next_state = BNX2X_Q_STATE_MULTI_COS;
4971 next_tx_only = 1;
4972 }
4973
4974 else if (cmd == BNX2X_Q_CMD_HALT)
4975 next_state = BNX2X_Q_STATE_STOPPED;
4976
4977 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4978 /* If "active" state change is requested, update the
4979 * state accordingly.
4980 */
4981 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4982 &update_params->update_flags) &&
4983 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4984 &update_params->update_flags))
4985 next_state = BNX2X_Q_STATE_INACTIVE;
4986 else
4987 next_state = BNX2X_Q_STATE_ACTIVE;
4988 }
4989
4990 break;
4991 case BNX2X_Q_STATE_MULTI_COS:
4992 if (cmd == BNX2X_Q_CMD_TERMINATE)
4993 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
4994
4995 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4996 next_state = BNX2X_Q_STATE_MULTI_COS;
4997 next_tx_only = o->num_tx_only + 1;
4998 }
4999
5000 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5001 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5002 next_state = BNX2X_Q_STATE_MULTI_COS;
5003
5004 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5005 /* If "active" state change is requested, update the
5006 * state accordingly.
5007 */
5008 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5009 &update_params->update_flags) &&
5010 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5011 &update_params->update_flags))
5012 next_state = BNX2X_Q_STATE_INACTIVE;
5013 else
5014 next_state = BNX2X_Q_STATE_MULTI_COS;
5015 }
5016
5017 break;
5018 case BNX2X_Q_STATE_MCOS_TERMINATED:
5019 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5020 next_tx_only = o->num_tx_only - 1;
5021 if (next_tx_only == 0)
5022 next_state = BNX2X_Q_STATE_ACTIVE;
5023 else
5024 next_state = BNX2X_Q_STATE_MULTI_COS;
5025 }
5026
5027 break;
5028 case BNX2X_Q_STATE_INACTIVE:
5029 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5030 next_state = BNX2X_Q_STATE_ACTIVE;
5031
5032 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5033 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5034 next_state = BNX2X_Q_STATE_INACTIVE;
5035
5036 else if (cmd == BNX2X_Q_CMD_HALT)
5037 next_state = BNX2X_Q_STATE_STOPPED;
5038
5039 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5040 /* If "active" state change is requested, update the
5041 * state accordingly.
5042 */
5043 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5044 &update_params->update_flags) &&
5045 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5046 &update_params->update_flags)){
5047 if (o->num_tx_only == 0)
5048 next_state = BNX2X_Q_STATE_ACTIVE;
5049 else /* tx only queues exist for this queue */
5050 next_state = BNX2X_Q_STATE_MULTI_COS;
5051 } else
5052 next_state = BNX2X_Q_STATE_INACTIVE;
5053 }
5054
5055 break;
5056 case BNX2X_Q_STATE_STOPPED:
5057 if (cmd == BNX2X_Q_CMD_TERMINATE)
5058 next_state = BNX2X_Q_STATE_TERMINATED;
5059
5060 break;
5061 case BNX2X_Q_STATE_TERMINATED:
5062 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5063 next_state = BNX2X_Q_STATE_RESET;
5064
5065 break;
5066 default:
5067 BNX2X_ERR("Illegal state: %d\n", state);
5068 }
5069
5070 /* Transition is assured */
5071 if (next_state != BNX2X_Q_STATE_MAX) {
5072 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5073 state, cmd, next_state);
5074 o->next_state = next_state;
5075 o->next_tx_only = next_tx_only;
5076 return 0;
5077 }
5078
5079 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5080
5081 return -EINVAL;
5082}
5083
5084void bnx2x_init_queue_obj(struct bnx2x *bp,
5085 struct bnx2x_queue_sp_obj *obj,
5086 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5087 void *rdata,
5088 dma_addr_t rdata_mapping, unsigned long type)
5089{
5090 memset(obj, 0, sizeof(*obj));
5091
5092 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5093 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5094
5095 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5096 obj->max_cos = cid_cnt;
5097 obj->cl_id = cl_id;
5098 obj->func_id = func_id;
5099 obj->rdata = rdata;
5100 obj->rdata_mapping = rdata_mapping;
5101 obj->type = type;
5102 obj->next_state = BNX2X_Q_STATE_MAX;
5103
5104 if (CHIP_IS_E1x(bp))
5105 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5106 else
5107 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5108
5109 obj->check_transition = bnx2x_queue_chk_transition;
5110
5111 obj->complete_cmd = bnx2x_queue_comp_cmd;
5112 obj->wait_comp = bnx2x_queue_wait_comp;
5113 obj->set_pending = bnx2x_queue_set_pending;
5114}
5115
5116void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5117 struct bnx2x_queue_sp_obj *obj,
5118 u32 cid, u8 index)
5119{
5120 obj->cids[index] = cid;
5121}
5122
5123/********************** Function state object *********************************/
5124enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5125 struct bnx2x_func_sp_obj *o)
5126{
5127 /* in the middle of transaction - return INVALID state */
5128 if (o->pending)
5129 return BNX2X_F_STATE_MAX;
5130
5131 /*
5132 * unsure the order of reading of o->pending and o->state
5133 * o->pending should be read first
5134 */
5135 rmb();
5136
5137 return o->state;
5138}
5139
5140static int bnx2x_func_wait_comp(struct bnx2x *bp,
5141 struct bnx2x_func_sp_obj *o,
5142 enum bnx2x_func_cmd cmd)
5143{
5144 return bnx2x_state_wait(bp, cmd, &o->pending);
5145}
5146
5147/**
5148 * bnx2x_func_state_change_comp - complete the state machine transition
5149 *
5150 * @bp: device handle
5151 * @o:
5152 * @cmd:
5153 *
5154 * Called on state change transition. Completes the state
5155 * machine transition only - no HW interaction.
5156 */
5157static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5158 struct bnx2x_func_sp_obj *o,
5159 enum bnx2x_func_cmd cmd)
5160{
5161 unsigned long cur_pending = o->pending;
5162
5163 if (!test_and_clear_bit(cmd, &cur_pending)) {
5164 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5165 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5166 o->state, cur_pending, o->next_state);
5167 return -EINVAL;
5168 }
5169
5170 DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
5171 "%d\n", cmd, BP_FUNC(bp), o->next_state);
5172
5173 o->state = o->next_state;
5174 o->next_state = BNX2X_F_STATE_MAX;
5175
5176 /* It's important that o->state and o->next_state are
5177 * updated before o->pending.
5178 */
5179 wmb();
5180
5181 clear_bit(cmd, &o->pending);
5182 smp_mb__after_clear_bit();
5183
5184 return 0;
5185}
5186
5187/**
5188 * bnx2x_func_comp_cmd - complete the state change command
5189 *
5190 * @bp: device handle
5191 * @o:
5192 * @cmd:
5193 *
5194 * Checks that the arrived completion is expected.
5195 */
5196static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5197 struct bnx2x_func_sp_obj *o,
5198 enum bnx2x_func_cmd cmd)
5199{
5200 /* Complete the state machine part first, check if it's a
5201 * legal completion.
5202 */
5203 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5204 return rc;
5205}
5206
5207/**
5208 * bnx2x_func_chk_transition - perform function state machine transition
5209 *
5210 * @bp: device handle
5211 * @o:
5212 * @params:
5213 *
5214 * It both checks if the requested command is legal in a current
5215 * state and, if it's legal, sets a `next_state' in the object
5216 * that will be used in the completion flow to set the `state'
5217 * of the object.
5218 *
5219 * returns 0 if a requested command is a legal transition,
5220 * -EINVAL otherwise.
5221 */
5222static int bnx2x_func_chk_transition(struct bnx2x *bp,
5223 struct bnx2x_func_sp_obj *o,
5224 struct bnx2x_func_state_params *params)
5225{
5226 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5227 enum bnx2x_func_cmd cmd = params->cmd;
5228
5229 /*
5230 * Forget all pending for completion commands if a driver only state
5231 * transition has been requested.
5232 */
5233 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5234 o->pending = 0;
5235 o->next_state = BNX2X_F_STATE_MAX;
5236 }
5237
5238 /*
5239 * Don't allow a next state transition if we are in the middle of
5240 * the previous one.
5241 */
5242 if (o->pending)
5243 return -EBUSY;
5244
5245 switch (state) {
5246 case BNX2X_F_STATE_RESET:
5247 if (cmd == BNX2X_F_CMD_HW_INIT)
5248 next_state = BNX2X_F_STATE_INITIALIZED;
5249
5250 break;
5251 case BNX2X_F_STATE_INITIALIZED:
5252 if (cmd == BNX2X_F_CMD_START)
5253 next_state = BNX2X_F_STATE_STARTED;
5254
5255 else if (cmd == BNX2X_F_CMD_HW_RESET)
5256 next_state = BNX2X_F_STATE_RESET;
5257
5258 break;
5259 case BNX2X_F_STATE_STARTED:
5260 if (cmd == BNX2X_F_CMD_STOP)
5261 next_state = BNX2X_F_STATE_INITIALIZED;
5262 else if (cmd == BNX2X_F_CMD_TX_STOP)
5263 next_state = BNX2X_F_STATE_TX_STOPPED;
5264
5265 break;
5266 case BNX2X_F_STATE_TX_STOPPED:
5267 if (cmd == BNX2X_F_CMD_TX_START)
5268 next_state = BNX2X_F_STATE_STARTED;
5269
5270 break;
5271 default:
5272 BNX2X_ERR("Unknown state: %d\n", state);
5273 }
5274
5275 /* Transition is assured */
5276 if (next_state != BNX2X_F_STATE_MAX) {
5277 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5278 state, cmd, next_state);
5279 o->next_state = next_state;
5280 return 0;
5281 }
5282
5283 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5284 state, cmd);
5285
5286 return -EINVAL;
5287}
5288
5289/**
5290 * bnx2x_func_init_func - performs HW init at function stage
5291 *
5292 * @bp: device handle
5293 * @drv:
5294 *
5295 * Init HW when the current phase is
5296 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5297 * HW blocks.
5298 */
5299static inline int bnx2x_func_init_func(struct bnx2x *bp,
5300 const struct bnx2x_func_sp_drv_ops *drv)
5301{
5302 return drv->init_hw_func(bp);
5303}
5304
5305/**
5306 * bnx2x_func_init_port - performs HW init at port stage
5307 *
5308 * @bp: device handle
5309 * @drv:
5310 *
5311 * Init HW when the current phase is
5312 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5313 * FUNCTION-only HW blocks.
5314 *
5315 */
5316static inline int bnx2x_func_init_port(struct bnx2x *bp,
5317 const struct bnx2x_func_sp_drv_ops *drv)
5318{
5319 int rc = drv->init_hw_port(bp);
5320 if (rc)
5321 return rc;
5322
5323 return bnx2x_func_init_func(bp, drv);
5324}
5325
5326/**
5327 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5328 *
5329 * @bp: device handle
5330 * @drv:
5331 *
5332 * Init HW when the current phase is
5333 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5334 * PORT-only and FUNCTION-only HW blocks.
5335 */
5336static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5337 const struct bnx2x_func_sp_drv_ops *drv)
5338{
5339 int rc = drv->init_hw_cmn_chip(bp);
5340 if (rc)
5341 return rc;
5342
5343 return bnx2x_func_init_port(bp, drv);
5344}
5345
5346/**
5347 * bnx2x_func_init_cmn - performs HW init at common stage
5348 *
5349 * @bp: device handle
5350 * @drv:
5351 *
5352 * Init HW when the current phase is
5353 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5354 * PORT-only and FUNCTION-only HW blocks.
5355 */
5356static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5357 const struct bnx2x_func_sp_drv_ops *drv)
5358{
5359 int rc = drv->init_hw_cmn(bp);
5360 if (rc)
5361 return rc;
5362
5363 return bnx2x_func_init_port(bp, drv);
5364}
5365
5366static int bnx2x_func_hw_init(struct bnx2x *bp,
5367 struct bnx2x_func_state_params *params)
5368{
5369 u32 load_code = params->params.hw_init.load_phase;
5370 struct bnx2x_func_sp_obj *o = params->f_obj;
5371 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5372 int rc = 0;
5373
5374 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5375 BP_ABS_FUNC(bp), load_code);
5376
5377 /* Prepare buffers for unzipping the FW */
5378 rc = drv->gunzip_init(bp);
5379 if (rc)
5380 return rc;
5381
5382 /* Prepare FW */
5383 rc = drv->init_fw(bp);
5384 if (rc) {
5385 BNX2X_ERR("Error loading firmware\n");
5386 goto fw_init_err;
5387 }
5388
5389 /* Handle the beginning of COMMON_XXX pases separatelly... */
5390 switch (load_code) {
5391 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5392 rc = bnx2x_func_init_cmn_chip(bp, drv);
5393 if (rc)
5394 goto init_hw_err;
5395
5396 break;
5397 case FW_MSG_CODE_DRV_LOAD_COMMON:
5398 rc = bnx2x_func_init_cmn(bp, drv);
5399 if (rc)
5400 goto init_hw_err;
5401
5402 break;
5403 case FW_MSG_CODE_DRV_LOAD_PORT:
5404 rc = bnx2x_func_init_port(bp, drv);
5405 if (rc)
5406 goto init_hw_err;
5407
5408 break;
5409 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5410 rc = bnx2x_func_init_func(bp, drv);
5411 if (rc)
5412 goto init_hw_err;
5413
5414 break;
5415 default:
5416 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5417 rc = -EINVAL;
5418 }
5419
5420init_hw_err:
5421 drv->release_fw(bp);
5422
5423fw_init_err:
5424 drv->gunzip_end(bp);
5425
5426 /* In case of success, complete the comand immediatelly: no ramrods
5427 * have been sent.
5428 */
5429 if (!rc)
5430 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5431
5432 return rc;
5433}
5434
5435/**
5436 * bnx2x_func_reset_func - reset HW at function stage
5437 *
5438 * @bp: device handle
5439 * @drv:
5440 *
5441 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5442 * FUNCTION-only HW blocks.
5443 */
5444static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5445 const struct bnx2x_func_sp_drv_ops *drv)
5446{
5447 drv->reset_hw_func(bp);
5448}
5449
5450/**
5451 * bnx2x_func_reset_port - reser HW at port stage
5452 *
5453 * @bp: device handle
5454 * @drv:
5455 *
5456 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5457 * FUNCTION-only and PORT-only HW blocks.
5458 *
5459 * !!!IMPORTANT!!!
5460 *
5461 * It's important to call reset_port before reset_func() as the last thing
5462 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5463 * makes impossible any DMAE transactions.
5464 */
5465static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5466 const struct bnx2x_func_sp_drv_ops *drv)
5467{
5468 drv->reset_hw_port(bp);
5469 bnx2x_func_reset_func(bp, drv);
5470}
5471
5472/**
5473 * bnx2x_func_reset_cmn - reser HW at common stage
5474 *
5475 * @bp: device handle
5476 * @drv:
5477 *
5478 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5479 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5480 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5481 */
5482static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5483 const struct bnx2x_func_sp_drv_ops *drv)
5484{
5485 bnx2x_func_reset_port(bp, drv);
5486 drv->reset_hw_cmn(bp);
5487}
5488
5489
5490static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5491 struct bnx2x_func_state_params *params)
5492{
5493 u32 reset_phase = params->params.hw_reset.reset_phase;
5494 struct bnx2x_func_sp_obj *o = params->f_obj;
5495 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5496
5497 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5498 reset_phase);
5499
5500 switch (reset_phase) {
5501 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5502 bnx2x_func_reset_cmn(bp, drv);
5503 break;
5504 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5505 bnx2x_func_reset_port(bp, drv);
5506 break;
5507 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5508 bnx2x_func_reset_func(bp, drv);
5509 break;
5510 default:
5511 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5512 reset_phase);
5513 break;
5514 }
5515
5516 /* Complete the comand immediatelly: no ramrods have been sent. */
5517 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5518
5519 return 0;
5520}
5521
5522static inline int bnx2x_func_send_start(struct bnx2x *bp,
5523 struct bnx2x_func_state_params *params)
5524{
5525 struct bnx2x_func_sp_obj *o = params->f_obj;
5526 struct function_start_data *rdata =
5527 (struct function_start_data *)o->rdata;
5528 dma_addr_t data_mapping = o->rdata_mapping;
5529 struct bnx2x_func_start_params *start_params = &params->params.start;
5530
5531 memset(rdata, 0, sizeof(*rdata));
5532
5533 /* Fill the ramrod data with provided parameters */
5534 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5535 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5536 rdata->path_id = BP_PATH(bp);
5537 rdata->network_cos_mode = start_params->network_cos_mode;
5538
5539 /*
5540 * No need for an explicit memory barrier here as long we would
5541 * need to ensure the ordering of writing to the SPQ element
5542 * and updating of the SPQ producer which involves a memory
5543 * read and we will have to put a full memory barrier there
5544 * (inside bnx2x_sp_post()).
5545 */
5546
5547 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5548 U64_HI(data_mapping),
5549 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5550}
5551
5552static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5553 struct bnx2x_func_state_params *params)
5554{
5555 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5556 NONE_CONNECTION_TYPE);
5557}
5558
5559static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5560 struct bnx2x_func_state_params *params)
5561{
5562 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5563 NONE_CONNECTION_TYPE);
5564}
5565static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5566 struct bnx2x_func_state_params *params)
5567{
5568 struct bnx2x_func_sp_obj *o = params->f_obj;
5569 struct flow_control_configuration *rdata =
5570 (struct flow_control_configuration *)o->rdata;
5571 dma_addr_t data_mapping = o->rdata_mapping;
5572 struct bnx2x_func_tx_start_params *tx_start_params =
5573 &params->params.tx_start;
5574 int i;
5575
5576 memset(rdata, 0, sizeof(*rdata));
5577
5578 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5579 rdata->dcb_version = tx_start_params->dcb_version;
5580 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5581
5582 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5583 rdata->traffic_type_to_priority_cos[i] =
5584 tx_start_params->traffic_type_to_priority_cos[i];
5585
5586 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5587 U64_HI(data_mapping),
5588 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5589}
5590
5591static int bnx2x_func_send_cmd(struct bnx2x *bp,
5592 struct bnx2x_func_state_params *params)
5593{
5594 switch (params->cmd) {
5595 case BNX2X_F_CMD_HW_INIT:
5596 return bnx2x_func_hw_init(bp, params);
5597 case BNX2X_F_CMD_START:
5598 return bnx2x_func_send_start(bp, params);
5599 case BNX2X_F_CMD_STOP:
5600 return bnx2x_func_send_stop(bp, params);
5601 case BNX2X_F_CMD_HW_RESET:
5602 return bnx2x_func_hw_reset(bp, params);
5603 case BNX2X_F_CMD_TX_STOP:
5604 return bnx2x_func_send_tx_stop(bp, params);
5605 case BNX2X_F_CMD_TX_START:
5606 return bnx2x_func_send_tx_start(bp, params);
5607 default:
5608 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5609 return -EINVAL;
5610 }
5611}
5612
5613void bnx2x_init_func_obj(struct bnx2x *bp,
5614 struct bnx2x_func_sp_obj *obj,
5615 void *rdata, dma_addr_t rdata_mapping,
5616 struct bnx2x_func_sp_drv_ops *drv_iface)
5617{
5618 memset(obj, 0, sizeof(*obj));
5619
5620 mutex_init(&obj->one_pending_mutex);
5621
5622 obj->rdata = rdata;
5623 obj->rdata_mapping = rdata_mapping;
5624
5625 obj->send_cmd = bnx2x_func_send_cmd;
5626 obj->check_transition = bnx2x_func_chk_transition;
5627 obj->complete_cmd = bnx2x_func_comp_cmd;
5628 obj->wait_comp = bnx2x_func_wait_comp;
5629
5630 obj->drv = drv_iface;
5631}
5632
5633/**
5634 * bnx2x_func_state_change - perform Function state change transition
5635 *
5636 * @bp: device handle
5637 * @params: parameters to perform the transaction
5638 *
5639 * returns 0 in case of successfully completed transition,
5640 * negative error code in case of failure, positive
5641 * (EBUSY) value if there is a completion to that is
5642 * still pending (possible only if RAMROD_COMP_WAIT is
5643 * not set in params->ramrod_flags for asynchronous
5644 * commands).
5645 */
5646int bnx2x_func_state_change(struct bnx2x *bp,
5647 struct bnx2x_func_state_params *params)
5648{
5649 struct bnx2x_func_sp_obj *o = params->f_obj;
5650 int rc;
5651 enum bnx2x_func_cmd cmd = params->cmd;
5652 unsigned long *pending = &o->pending;
5653
5654 mutex_lock(&o->one_pending_mutex);
5655
5656 /* Check that the requested transition is legal */
5657 if (o->check_transition(bp, o, params)) {
5658 mutex_unlock(&o->one_pending_mutex);
5659 return -EINVAL;
5660 }
5661
5662 /* Set "pending" bit */
5663 set_bit(cmd, pending);
5664
5665 /* Don't send a command if only driver cleanup was requested */
5666 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5667 bnx2x_func_state_change_comp(bp, o, cmd);
5668 mutex_unlock(&o->one_pending_mutex);
5669 } else {
5670 /* Send a ramrod */
5671 rc = o->send_cmd(bp, params);
5672
5673 mutex_unlock(&o->one_pending_mutex);
5674
5675 if (rc) {
5676 o->next_state = BNX2X_F_STATE_MAX;
5677 clear_bit(cmd, pending);
5678 smp_mb__after_clear_bit();
5679 return rc;
5680 }
5681
5682 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5683 rc = o->wait_comp(bp, o, cmd);
5684 if (rc)
5685 return rc;
5686
5687 return 0;
5688 }
5689 }
5690
5691 return !!test_bit(cmd, pending);
5692}
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
new file mode 100644
index 00000000000..9a517c2e9f1
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.h
@@ -0,0 +1,1297 @@
1/* bnx2x_sp.h: Broadcom Everest network driver.
2 *
3 * Copyright 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
19#ifndef BNX2X_SP_VERBS
20#define BNX2X_SP_VERBS
21
22struct bnx2x;
23struct eth_context;
24
25/* Bits representing general command's configuration */
26enum {
27 RAMROD_TX,
28 RAMROD_RX,
29 /* Wait until all pending commands complete */
30 RAMROD_COMP_WAIT,
31 /* Don't send a ramrod, only update a registry */
32 RAMROD_DRV_CLR_ONLY,
33 /* Configure HW according to the current object state */
34 RAMROD_RESTORE,
35 /* Execute the next command now */
36 RAMROD_EXEC,
37 /*
38 * Don't add a new command and continue execution of posponed
39 * commands. If not set a new command will be added to the
40 * pending commands list.
41 */
42 RAMROD_CONT,
43};
44
45typedef enum {
46 BNX2X_OBJ_TYPE_RX,
47 BNX2X_OBJ_TYPE_TX,
48 BNX2X_OBJ_TYPE_RX_TX,
49} bnx2x_obj_type;
50
51/* Filtering states */
52enum {
53 BNX2X_FILTER_MAC_PENDING,
54 BNX2X_FILTER_VLAN_PENDING,
55 BNX2X_FILTER_VLAN_MAC_PENDING,
56 BNX2X_FILTER_RX_MODE_PENDING,
57 BNX2X_FILTER_RX_MODE_SCHED,
58 BNX2X_FILTER_ISCSI_ETH_START_SCHED,
59 BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
60 BNX2X_FILTER_FCOE_ETH_START_SCHED,
61 BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
62 BNX2X_FILTER_MCAST_PENDING,
63 BNX2X_FILTER_MCAST_SCHED,
64 BNX2X_FILTER_RSS_CONF_PENDING,
65};
66
67struct bnx2x_raw_obj {
68 u8 func_id;
69
70 /* Queue params */
71 u8 cl_id;
72 u32 cid;
73
74 /* Ramrod data buffer params */
75 void *rdata;
76 dma_addr_t rdata_mapping;
77
78 /* Ramrod state params */
79 int state; /* "ramrod is pending" state bit */
80 unsigned long *pstate; /* pointer to state buffer */
81
82 bnx2x_obj_type obj_type;
83
84 int (*wait_comp)(struct bnx2x *bp,
85 struct bnx2x_raw_obj *o);
86
87 bool (*check_pending)(struct bnx2x_raw_obj *o);
88 void (*clear_pending)(struct bnx2x_raw_obj *o);
89 void (*set_pending)(struct bnx2x_raw_obj *o);
90};
91
92/************************* VLAN-MAC commands related parameters ***************/
93struct bnx2x_mac_ramrod_data {
94 u8 mac[ETH_ALEN];
95};
96
97struct bnx2x_vlan_ramrod_data {
98 u16 vlan;
99};
100
101struct bnx2x_vlan_mac_ramrod_data {
102 u8 mac[ETH_ALEN];
103 u16 vlan;
104};
105
106union bnx2x_classification_ramrod_data {
107 struct bnx2x_mac_ramrod_data mac;
108 struct bnx2x_vlan_ramrod_data vlan;
109 struct bnx2x_vlan_mac_ramrod_data vlan_mac;
110};
111
112/* VLAN_MAC commands */
113enum bnx2x_vlan_mac_cmd {
114 BNX2X_VLAN_MAC_ADD,
115 BNX2X_VLAN_MAC_DEL,
116 BNX2X_VLAN_MAC_MOVE,
117};
118
119struct bnx2x_vlan_mac_data {
120 /* Requested command: BNX2X_VLAN_MAC_XX */
121 enum bnx2x_vlan_mac_cmd cmd;
122 /*
123 * used to contain the data related vlan_mac_flags bits from
124 * ramrod parameters.
125 */
126 unsigned long vlan_mac_flags;
127
128 /* Needed for MOVE command */
129 struct bnx2x_vlan_mac_obj *target_obj;
130
131 union bnx2x_classification_ramrod_data u;
132};
133
134/*************************** Exe Queue obj ************************************/
135union bnx2x_exe_queue_cmd_data {
136 struct bnx2x_vlan_mac_data vlan_mac;
137
138 struct {
139 /* TODO */
140 } mcast;
141};
142
143struct bnx2x_exeq_elem {
144 struct list_head link;
145
146 /* Length of this element in the exe_chunk. */
147 int cmd_len;
148
149 union bnx2x_exe_queue_cmd_data cmd_data;
150};
151
152union bnx2x_qable_obj;
153
154union bnx2x_exeq_comp_elem {
155 union event_ring_elem *elem;
156};
157
158struct bnx2x_exe_queue_obj;
159
160typedef int (*exe_q_validate)(struct bnx2x *bp,
161 union bnx2x_qable_obj *o,
162 struct bnx2x_exeq_elem *elem);
163
164/**
165 * @return positive is entry was optimized, 0 - if not, negative
166 * in case of an error.
167 */
168typedef int (*exe_q_optimize)(struct bnx2x *bp,
169 union bnx2x_qable_obj *o,
170 struct bnx2x_exeq_elem *elem);
171typedef int (*exe_q_execute)(struct bnx2x *bp,
172 union bnx2x_qable_obj *o,
173 struct list_head *exe_chunk,
174 unsigned long *ramrod_flags);
175typedef struct bnx2x_exeq_elem *
176 (*exe_q_get)(struct bnx2x_exe_queue_obj *o,
177 struct bnx2x_exeq_elem *elem);
178
179struct bnx2x_exe_queue_obj {
180 /*
181 * Commands pending for an execution.
182 */
183 struct list_head exe_queue;
184
185 /*
186 * Commands pending for an completion.
187 */
188 struct list_head pending_comp;
189
190 spinlock_t lock;
191
192 /* Maximum length of commands' list for one execution */
193 int exe_chunk_len;
194
195 union bnx2x_qable_obj *owner;
196
197 /****** Virtual functions ******/
198 /**
199 * Called before commands execution for commands that are really
200 * going to be executed (after 'optimize').
201 *
202 * Must run under exe_queue->lock
203 */
204 exe_q_validate validate;
205
206
207 /**
208 * This will try to cancel the current pending commands list
209 * considering the new command.
210 *
211 * Must run under exe_queue->lock
212 */
213 exe_q_optimize optimize;
214
215 /**
216 * Run the next commands chunk (owner specific).
217 */
218 exe_q_execute execute;
219
220 /**
221 * Return the exe_queue element containing the specific command
222 * if any. Otherwise return NULL.
223 */
224 exe_q_get get;
225};
226/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
227/*
228 * Element in the VLAN_MAC registry list having all currenty configured
229 * rules.
230 */
231struct bnx2x_vlan_mac_registry_elem {
232 struct list_head link;
233
234 /*
235 * Used to store the cam offset used for the mac/vlan/vlan-mac.
236 * Relevant for 57710 and 57711 only. VLANs and MACs share the
237 * same CAM for these chips.
238 */
239 int cam_offset;
240
241 /* Needed for DEL and RESTORE flows */
242 unsigned long vlan_mac_flags;
243
244 union bnx2x_classification_ramrod_data u;
245};
246
247/* Bits representing VLAN_MAC commands specific flags */
248enum {
249 BNX2X_UC_LIST_MAC,
250 BNX2X_ETH_MAC,
251 BNX2X_ISCSI_ETH_MAC,
252 BNX2X_NETQ_ETH_MAC,
253 BNX2X_DONT_CONSUME_CAM_CREDIT,
254 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
255};
256
257struct bnx2x_vlan_mac_ramrod_params {
258 /* Object to run the command from */
259 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
260
261 /* General command flags: COMP_WAIT, etc. */
262 unsigned long ramrod_flags;
263
264 /* Command specific configuration request */
265 struct bnx2x_vlan_mac_data user_req;
266};
267
268struct bnx2x_vlan_mac_obj {
269 struct bnx2x_raw_obj raw;
270
271 /* Bookkeeping list: will prevent the addition of already existing
272 * entries.
273 */
274 struct list_head head;
275
276 /* TODO: Add it's initialization in the init functions */
277 struct bnx2x_exe_queue_obj exe_queue;
278
279 /* MACs credit pool */
280 struct bnx2x_credit_pool_obj *macs_pool;
281
282 /* VLANs credit pool */
283 struct bnx2x_credit_pool_obj *vlans_pool;
284
285 /* RAMROD command to be used */
286 int ramrod_cmd;
287
288 /**
289 * Checks if ADD-ramrod with the given params may be performed.
290 *
291 * @return zero if the element may be added
292 */
293
294 int (*check_add)(struct bnx2x_vlan_mac_obj *o,
295 union bnx2x_classification_ramrod_data *data);
296
297 /**
298 * Checks if DEL-ramrod with the given params may be performed.
299 *
300 * @return true if the element may be deleted
301 */
302 struct bnx2x_vlan_mac_registry_elem *
303 (*check_del)(struct bnx2x_vlan_mac_obj *o,
304 union bnx2x_classification_ramrod_data *data);
305
306 /**
307 * Checks if DEL-ramrod with the given params may be performed.
308 *
309 * @return true if the element may be deleted
310 */
311 bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
312 struct bnx2x_vlan_mac_obj *dst_o,
313 union bnx2x_classification_ramrod_data *data);
314
315 /**
316 * Update the relevant credit object(s) (consume/return
317 * correspondingly).
318 */
319 bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
320 bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
321 bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
322 bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
323
324 /**
325 * Configures one rule in the ramrod data buffer.
326 */
327 void (*set_one_rule)(struct bnx2x *bp,
328 struct bnx2x_vlan_mac_obj *o,
329 struct bnx2x_exeq_elem *elem, int rule_idx,
330 int cam_offset);
331
332 /**
333 * Delete all configured elements having the given
334 * vlan_mac_flags specification. Assumes no pending for
335 * execution commands. Will schedule all all currently
336 * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
337 * specification for deletion and will use the given
338 * ramrod_flags for the last DEL operation.
339 *
340 * @param bp
341 * @param o
342 * @param ramrod_flags RAMROD_XX flags
343 *
344 * @return 0 if the last operation has completed successfully
345 * and there are no more elements left, positive value
346 * if there are pending for completion commands,
347 * negative value in case of failure.
348 */
349 int (*delete_all)(struct bnx2x *bp,
350 struct bnx2x_vlan_mac_obj *o,
351 unsigned long *vlan_mac_flags,
352 unsigned long *ramrod_flags);
353
354 /**
355 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
356 * configured elements list.
357 *
358 * @param bp
359 * @param p Command parameters (RAMROD_COMP_WAIT bit in
360 * ramrod_flags is only taken into an account)
361 * @param ppos a pointer to the cooky that should be given back in the
362 * next call to make function handle the next element. If
363 * *ppos is set to NULL it will restart the iterator.
364 * If returned *ppos == NULL this means that the last
365 * element has been handled.
366 *
367 * @return int
368 */
369 int (*restore)(struct bnx2x *bp,
370 struct bnx2x_vlan_mac_ramrod_params *p,
371 struct bnx2x_vlan_mac_registry_elem **ppos);
372
373 /**
374 * Should be called on a completion arival.
375 *
376 * @param bp
377 * @param o
378 * @param cqe Completion element we are handling
379 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
380 * pending commands will be executed.
381 * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
382 * may also be set if needed.
383 *
384 * @return 0 if there are neither pending nor waiting for
385 * completion commands. Positive value if there are
386 * pending for execution or for completion commands.
387 * Negative value in case of an error (including an
388 * error in the cqe).
389 */
390 int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
391 union event_ring_elem *cqe,
392 unsigned long *ramrod_flags);
393
394 /**
395 * Wait for completion of all commands. Don't schedule new ones,
396 * just wait. It assumes that the completion code will schedule
397 * for new commands.
398 */
399 int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
400};
401
402/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
403
404/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
405 * a bnx2x_rx_mode_ramrod_params.
406 */
407enum {
408 BNX2X_RX_MODE_FCOE_ETH,
409 BNX2X_RX_MODE_ISCSI_ETH,
410};
411
412enum {
413 BNX2X_ACCEPT_UNICAST,
414 BNX2X_ACCEPT_MULTICAST,
415 BNX2X_ACCEPT_ALL_UNICAST,
416 BNX2X_ACCEPT_ALL_MULTICAST,
417 BNX2X_ACCEPT_BROADCAST,
418 BNX2X_ACCEPT_UNMATCHED,
419 BNX2X_ACCEPT_ANY_VLAN
420};
421
422struct bnx2x_rx_mode_ramrod_params {
423 struct bnx2x_rx_mode_obj *rx_mode_obj;
424 unsigned long *pstate;
425 int state;
426 u8 cl_id;
427 u32 cid;
428 u8 func_id;
429 unsigned long ramrod_flags;
430 unsigned long rx_mode_flags;
431
432 /*
433 * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
434 * a tstorm_eth_mac_filter_config (e1x).
435 */
436 void *rdata;
437 dma_addr_t rdata_mapping;
438
439 /* Rx mode settings */
440 unsigned long rx_accept_flags;
441
442 /* internal switching settings */
443 unsigned long tx_accept_flags;
444};
445
446struct bnx2x_rx_mode_obj {
447 int (*config_rx_mode)(struct bnx2x *bp,
448 struct bnx2x_rx_mode_ramrod_params *p);
449
450 int (*wait_comp)(struct bnx2x *bp,
451 struct bnx2x_rx_mode_ramrod_params *p);
452};
453
454/********************** Set multicast group ***********************************/
455
456struct bnx2x_mcast_list_elem {
457 struct list_head link;
458 u8 *mac;
459};
460
461union bnx2x_mcast_config_data {
462 u8 *mac;
463 u8 bin; /* used in a RESTORE flow */
464};
465
466struct bnx2x_mcast_ramrod_params {
467 struct bnx2x_mcast_obj *mcast_obj;
468
469 /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
470 unsigned long ramrod_flags;
471
472 struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
473 /** TODO:
474 * - rename it to macs_num.
475 * - Add a new command type for handling pending commands
476 * (remove "zero semantics").
477 *
478 * Length of mcast_list. If zero and ADD_CONT command - post
479 * pending commands.
480 */
481 int mcast_list_len;
482};
483
484enum {
485 BNX2X_MCAST_CMD_ADD,
486 BNX2X_MCAST_CMD_CONT,
487 BNX2X_MCAST_CMD_DEL,
488 BNX2X_MCAST_CMD_RESTORE,
489};
490
491struct bnx2x_mcast_obj {
492 struct bnx2x_raw_obj raw;
493
494 union {
495 struct {
496 #define BNX2X_MCAST_BINS_NUM 256
497 #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64)
498 u64 vec[BNX2X_MCAST_VEC_SZ];
499
500 /** Number of BINs to clear. Should be updated
501 * immediately when a command arrives in order to
502 * properly create DEL commands.
503 */
504 int num_bins_set;
505 } aprox_match;
506
507 struct {
508 struct list_head macs;
509 int num_macs_set;
510 } exact_match;
511 } registry;
512
513 /* Pending commands */
514 struct list_head pending_cmds_head;
515
516 /* A state that is set in raw.pstate, when there are pending commands */
517 int sched_state;
518
519 /* Maximal number of mcast MACs configured in one command */
520 int max_cmd_len;
521
522 /* Total number of currently pending MACs to configure: both
523 * in the pending commands list and in the current command.
524 */
525 int total_pending_num;
526
527 u8 engine_id;
528
529 /**
530 * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
531 */
532 int (*config_mcast)(struct bnx2x *bp,
533 struct bnx2x_mcast_ramrod_params *p, int cmd);
534
535 /**
536 * Fills the ramrod data during the RESTORE flow.
537 *
538 * @param bp
539 * @param o
540 * @param start_idx Registry index to start from
541 * @param rdata_idx Index in the ramrod data to start from
542 *
543 * @return -1 if we handled the whole registry or index of the last
544 * handled registry element.
545 */
546 int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
547 int start_bin, int *rdata_idx);
548
549 int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
550 struct bnx2x_mcast_ramrod_params *p, int cmd);
551
552 void (*set_one_rule)(struct bnx2x *bp,
553 struct bnx2x_mcast_obj *o, int idx,
554 union bnx2x_mcast_config_data *cfg_data, int cmd);
555
556 /** Checks if there are more mcast MACs to be set or a previous
557 * command is still pending.
558 */
559 bool (*check_pending)(struct bnx2x_mcast_obj *o);
560
561 /**
562 * Set/Clear/Check SCHEDULED state of the object
563 */
564 void (*set_sched)(struct bnx2x_mcast_obj *o);
565 void (*clear_sched)(struct bnx2x_mcast_obj *o);
566 bool (*check_sched)(struct bnx2x_mcast_obj *o);
567
568 /* Wait until all pending commands complete */
569 int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
570
571 /**
572 * Handle the internal object counters needed for proper
573 * commands handling. Checks that the provided parameters are
574 * feasible.
575 */
576 int (*validate)(struct bnx2x *bp,
577 struct bnx2x_mcast_ramrod_params *p, int cmd);
578
579 /**
580 * Restore the values of internal counters in case of a failure.
581 */
582 void (*revert)(struct bnx2x *bp,
583 struct bnx2x_mcast_ramrod_params *p,
584 int old_num_bins);
585
586 int (*get_registry_size)(struct bnx2x_mcast_obj *o);
587 void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
588};
589
590/*************************** Credit handling **********************************/
591struct bnx2x_credit_pool_obj {
592
593 /* Current amount of credit in the pool */
594 atomic_t credit;
595
596 /* Maximum allowed credit. put() will check against it. */
597 int pool_sz;
598
599 /*
600 * Allocate a pool table statically.
601 *
602 * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
603 *
604 * The set bit in the table will mean that the entry is available.
605 */
606#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
607 u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
608
609 /* Base pool offset (initialized differently */
610 int base_pool_offset;
611
612 /**
613 * Get the next free pool entry.
614 *
615 * @return true if there was a free entry in the pool
616 */
617 bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
618
619 /**
620 * Return the entry back to the pool.
621 *
622 * @return true if entry is legal and has been successfully
623 * returned to the pool.
624 */
625 bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
626
627 /**
628 * Get the requested amount of credit from the pool.
629 *
630 * @param cnt Amount of requested credit
631 * @return true if the operation is successful
632 */
633 bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
634
635 /**
636 * Returns the credit to the pool.
637 *
638 * @param cnt Amount of credit to return
639 * @return true if the operation is successful
640 */
641 bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
642
643 /**
644 * Reads the current amount of credit.
645 */
646 int (*check)(struct bnx2x_credit_pool_obj *o);
647};
648
649/*************************** RSS configuration ********************************/
650enum {
651 /* RSS_MODE bits are mutually exclusive */
652 BNX2X_RSS_MODE_DISABLED,
653 BNX2X_RSS_MODE_REGULAR,
654 BNX2X_RSS_MODE_VLAN_PRI,
655 BNX2X_RSS_MODE_E1HOV_PRI,
656 BNX2X_RSS_MODE_IP_DSCP,
657
658 BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
659
660 BNX2X_RSS_IPV4,
661 BNX2X_RSS_IPV4_TCP,
662 BNX2X_RSS_IPV6,
663 BNX2X_RSS_IPV6_TCP,
664};
665
666struct bnx2x_config_rss_params {
667 struct bnx2x_rss_config_obj *rss_obj;
668
669 /* may have RAMROD_COMP_WAIT set only */
670 unsigned long ramrod_flags;
671
672 /* BNX2X_RSS_X bits */
673 unsigned long rss_flags;
674
675 /* Number hash bits to take into an account */
676 u8 rss_result_mask;
677
678 /* Indirection table */
679 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
680
681 /* RSS hash values */
682 u32 rss_key[10];
683
684 /* valid only iff BNX2X_RSS_UPDATE_TOE is set */
685 u16 toe_rss_bitmap;
686};
687
688struct bnx2x_rss_config_obj {
689 struct bnx2x_raw_obj raw;
690
691 /* RSS engine to use */
692 u8 engine_id;
693
694 /* Last configured indirection table */
695 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
696
697 int (*config_rss)(struct bnx2x *bp,
698 struct bnx2x_config_rss_params *p);
699};
700
701/*********************** Queue state update ***********************************/
702
703/* UPDATE command options */
704enum {
705 BNX2X_Q_UPDATE_IN_VLAN_REM,
706 BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
707 BNX2X_Q_UPDATE_OUT_VLAN_REM,
708 BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
709 BNX2X_Q_UPDATE_ANTI_SPOOF,
710 BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
711 BNX2X_Q_UPDATE_ACTIVATE,
712 BNX2X_Q_UPDATE_ACTIVATE_CHNG,
713 BNX2X_Q_UPDATE_DEF_VLAN_EN,
714 BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
715 BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
716 BNX2X_Q_UPDATE_SILENT_VLAN_REM
717};
718
719/* Allowed Queue states */
720enum bnx2x_q_state {
721 BNX2X_Q_STATE_RESET,
722 BNX2X_Q_STATE_INITIALIZED,
723 BNX2X_Q_STATE_ACTIVE,
724 BNX2X_Q_STATE_MULTI_COS,
725 BNX2X_Q_STATE_MCOS_TERMINATED,
726 BNX2X_Q_STATE_INACTIVE,
727 BNX2X_Q_STATE_STOPPED,
728 BNX2X_Q_STATE_TERMINATED,
729 BNX2X_Q_STATE_FLRED,
730 BNX2X_Q_STATE_MAX,
731};
732
733/* Allowed commands */
734enum bnx2x_queue_cmd {
735 BNX2X_Q_CMD_INIT,
736 BNX2X_Q_CMD_SETUP,
737 BNX2X_Q_CMD_SETUP_TX_ONLY,
738 BNX2X_Q_CMD_DEACTIVATE,
739 BNX2X_Q_CMD_ACTIVATE,
740 BNX2X_Q_CMD_UPDATE,
741 BNX2X_Q_CMD_UPDATE_TPA,
742 BNX2X_Q_CMD_HALT,
743 BNX2X_Q_CMD_CFC_DEL,
744 BNX2X_Q_CMD_TERMINATE,
745 BNX2X_Q_CMD_EMPTY,
746 BNX2X_Q_CMD_MAX,
747};
748
749/* queue SETUP + INIT flags */
750enum {
751 BNX2X_Q_FLG_TPA,
752 BNX2X_Q_FLG_TPA_IPV6,
753 BNX2X_Q_FLG_STATS,
754 BNX2X_Q_FLG_ZERO_STATS,
755 BNX2X_Q_FLG_ACTIVE,
756 BNX2X_Q_FLG_OV,
757 BNX2X_Q_FLG_VLAN,
758 BNX2X_Q_FLG_COS,
759 BNX2X_Q_FLG_HC,
760 BNX2X_Q_FLG_HC_EN,
761 BNX2X_Q_FLG_DHC,
762 BNX2X_Q_FLG_FCOE,
763 BNX2X_Q_FLG_LEADING_RSS,
764 BNX2X_Q_FLG_MCAST,
765 BNX2X_Q_FLG_DEF_VLAN,
766 BNX2X_Q_FLG_TX_SWITCH,
767 BNX2X_Q_FLG_TX_SEC,
768 BNX2X_Q_FLG_ANTI_SPOOF,
769 BNX2X_Q_FLG_SILENT_VLAN_REM
770};
771
772/* Queue type options: queue type may be a compination of below. */
773enum bnx2x_q_type {
774 /** TODO: Consider moving both these flags into the init()
775 * ramrod params.
776 */
777 BNX2X_Q_TYPE_HAS_RX,
778 BNX2X_Q_TYPE_HAS_TX,
779};
780
781#define BNX2X_PRIMARY_CID_INDEX 0
782#define BNX2X_MULTI_TX_COS_E1X 1
783#define BNX2X_MULTI_TX_COS_E2_E3A0 2
784#define BNX2X_MULTI_TX_COS_E3B0 3
785#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0
786
787
788struct bnx2x_queue_init_params {
789 struct {
790 unsigned long flags;
791 u16 hc_rate;
792 u8 fw_sb_id;
793 u8 sb_cq_index;
794 } tx;
795
796 struct {
797 unsigned long flags;
798 u16 hc_rate;
799 u8 fw_sb_id;
800 u8 sb_cq_index;
801 } rx;
802
803 /* CID context in the host memory */
804 struct eth_context *cxts[BNX2X_MULTI_TX_COS];
805
806 /* maximum number of cos supported by hardware */
807 u8 max_cos;
808};
809
810struct bnx2x_queue_terminate_params {
811 /* index within the tx_only cids of this queue object */
812 u8 cid_index;
813};
814
815struct bnx2x_queue_cfc_del_params {
816 /* index within the tx_only cids of this queue object */
817 u8 cid_index;
818};
819
820struct bnx2x_queue_update_params {
821 unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */
822 u16 def_vlan;
823 u16 silent_removal_value;
824 u16 silent_removal_mask;
825/* index within the tx_only cids of this queue object */
826 u8 cid_index;
827};
828
829struct rxq_pause_params {
830 u16 bd_th_lo;
831 u16 bd_th_hi;
832 u16 rcq_th_lo;
833 u16 rcq_th_hi;
834 u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
835 u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
836 u16 pri_map;
837};
838
839/* general */
840struct bnx2x_general_setup_params {
841 /* valid iff BNX2X_Q_FLG_STATS */
842 u8 stat_id;
843
844 u8 spcl_id;
845 u16 mtu;
846 u8 cos;
847};
848
849struct bnx2x_rxq_setup_params {
850 /* dma */
851 dma_addr_t dscr_map;
852 dma_addr_t sge_map;
853 dma_addr_t rcq_map;
854 dma_addr_t rcq_np_map;
855
856 u16 drop_flags;
857 u16 buf_sz;
858 u8 fw_sb_id;
859 u8 cl_qzone_id;
860
861 /* valid iff BNX2X_Q_FLG_TPA */
862 u16 tpa_agg_sz;
863 u16 sge_buf_sz;
864 u8 max_sges_pkt;
865 u8 max_tpa_queues;
866 u8 rss_engine_id;
867
868 u8 cache_line_log;
869
870 u8 sb_cq_index;
871
872 /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
873 u16 silent_removal_value;
874 u16 silent_removal_mask;
875};
876
877struct bnx2x_txq_setup_params {
878 /* dma */
879 dma_addr_t dscr_map;
880
881 u8 fw_sb_id;
882 u8 sb_cq_index;
883 u8 cos; /* valid iff BNX2X_Q_FLG_COS */
884 u16 traffic_type;
885 /* equals to the leading rss client id, used for TX classification*/
886 u8 tss_leading_cl_id;
887
888 /* valid iff BNX2X_Q_FLG_DEF_VLAN */
889 u16 default_vlan;
890};
891
892struct bnx2x_queue_setup_params {
893 struct bnx2x_general_setup_params gen_params;
894 struct bnx2x_txq_setup_params txq_params;
895 struct bnx2x_rxq_setup_params rxq_params;
896 struct rxq_pause_params pause_params;
897 unsigned long flags;
898};
899
900struct bnx2x_queue_setup_tx_only_params {
901 struct bnx2x_general_setup_params gen_params;
902 struct bnx2x_txq_setup_params txq_params;
903 unsigned long flags;
904 /* index within the tx_only cids of this queue object */
905 u8 cid_index;
906};
907
908struct bnx2x_queue_state_params {
909 struct bnx2x_queue_sp_obj *q_obj;
910
911 /* Current command */
912 enum bnx2x_queue_cmd cmd;
913
914 /* may have RAMROD_COMP_WAIT set only */
915 unsigned long ramrod_flags;
916
917 /* Params according to the current command */
918 union {
919 struct bnx2x_queue_update_params update;
920 struct bnx2x_queue_setup_params setup;
921 struct bnx2x_queue_init_params init;
922 struct bnx2x_queue_setup_tx_only_params tx_only;
923 struct bnx2x_queue_terminate_params terminate;
924 struct bnx2x_queue_cfc_del_params cfc_del;
925 } params;
926};
927
928struct bnx2x_queue_sp_obj {
929 u32 cids[BNX2X_MULTI_TX_COS];
930 u8 cl_id;
931 u8 func_id;
932
933 /*
934 * number of traffic classes supported by queue.
935 * The primary connection of the queue suppotrs the first traffic
936 * class. Any further traffic class is suppoted by a tx-only
937 * connection.
938 *
939 * Therefore max_cos is also a number of valid entries in the cids
940 * array.
941 */
942 u8 max_cos;
943 u8 num_tx_only, next_tx_only;
944
945 enum bnx2x_q_state state, next_state;
946
947 /* bits from enum bnx2x_q_type */
948 unsigned long type;
949
950 /* BNX2X_Q_CMD_XX bits. This object implements "one
951 * pending" paradigm but for debug and tracing purposes it's
952 * more convinient to have different bits for different
953 * commands.
954 */
955 unsigned long pending;
956
957 /* Buffer to use as a ramrod data and its mapping */
958 void *rdata;
959 dma_addr_t rdata_mapping;
960
961 /**
962 * Performs one state change according to the given parameters.
963 *
964 * @return 0 in case of success and negative value otherwise.
965 */
966 int (*send_cmd)(struct bnx2x *bp,
967 struct bnx2x_queue_state_params *params);
968
969 /**
970 * Sets the pending bit according to the requested transition.
971 */
972 int (*set_pending)(struct bnx2x_queue_sp_obj *o,
973 struct bnx2x_queue_state_params *params);
974
975 /**
976 * Checks that the requested state transition is legal.
977 */
978 int (*check_transition)(struct bnx2x *bp,
979 struct bnx2x_queue_sp_obj *o,
980 struct bnx2x_queue_state_params *params);
981
982 /**
983 * Completes the pending command.
984 */
985 int (*complete_cmd)(struct bnx2x *bp,
986 struct bnx2x_queue_sp_obj *o,
987 enum bnx2x_queue_cmd);
988
989 int (*wait_comp)(struct bnx2x *bp,
990 struct bnx2x_queue_sp_obj *o,
991 enum bnx2x_queue_cmd cmd);
992};
993
994/********************** Function state update *********************************/
995/* Allowed Function states */
996enum bnx2x_func_state {
997 BNX2X_F_STATE_RESET,
998 BNX2X_F_STATE_INITIALIZED,
999 BNX2X_F_STATE_STARTED,
1000 BNX2X_F_STATE_TX_STOPPED,
1001 BNX2X_F_STATE_MAX,
1002};
1003
1004/* Allowed Function commands */
1005enum bnx2x_func_cmd {
1006 BNX2X_F_CMD_HW_INIT,
1007 BNX2X_F_CMD_START,
1008 BNX2X_F_CMD_STOP,
1009 BNX2X_F_CMD_HW_RESET,
1010 BNX2X_F_CMD_TX_STOP,
1011 BNX2X_F_CMD_TX_START,
1012 BNX2X_F_CMD_MAX,
1013};
1014
1015struct bnx2x_func_hw_init_params {
1016 /* A load phase returned by MCP.
1017 *
1018 * May be:
1019 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1020 * FW_MSG_CODE_DRV_LOAD_COMMON
1021 * FW_MSG_CODE_DRV_LOAD_PORT
1022 * FW_MSG_CODE_DRV_LOAD_FUNCTION
1023 */
1024 u32 load_phase;
1025};
1026
1027struct bnx2x_func_hw_reset_params {
1028 /* A load phase returned by MCP.
1029 *
1030 * May be:
1031 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1032 * FW_MSG_CODE_DRV_LOAD_COMMON
1033 * FW_MSG_CODE_DRV_LOAD_PORT
1034 * FW_MSG_CODE_DRV_LOAD_FUNCTION
1035 */
1036 u32 reset_phase;
1037};
1038
1039struct bnx2x_func_start_params {
1040 /* Multi Function mode:
1041 * - Single Function
1042 * - Switch Dependent
1043 * - Switch Independent
1044 */
1045 u16 mf_mode;
1046
1047 /* Switch Dependent mode outer VLAN tag */
1048 u16 sd_vlan_tag;
1049
1050 /* Function cos mode */
1051 u8 network_cos_mode;
1052};
1053
1054struct bnx2x_func_tx_start_params {
1055 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1056 u8 dcb_enabled;
1057 u8 dcb_version;
1058 u8 dont_add_pri_0_en;
1059};
1060
1061struct bnx2x_func_state_params {
1062 struct bnx2x_func_sp_obj *f_obj;
1063
1064 /* Current command */
1065 enum bnx2x_func_cmd cmd;
1066
1067 /* may have RAMROD_COMP_WAIT set only */
1068 unsigned long ramrod_flags;
1069
1070 /* Params according to the current command */
1071 union {
1072 struct bnx2x_func_hw_init_params hw_init;
1073 struct bnx2x_func_hw_reset_params hw_reset;
1074 struct bnx2x_func_start_params start;
1075 struct bnx2x_func_tx_start_params tx_start;
1076 } params;
1077};
1078
1079struct bnx2x_func_sp_drv_ops {
1080 /* Init tool + runtime initialization:
1081 * - Common Chip
1082 * - Common (per Path)
1083 * - Port
1084 * - Function phases
1085 */
1086 int (*init_hw_cmn_chip)(struct bnx2x *bp);
1087 int (*init_hw_cmn)(struct bnx2x *bp);
1088 int (*init_hw_port)(struct bnx2x *bp);
1089 int (*init_hw_func)(struct bnx2x *bp);
1090
1091 /* Reset Function HW: Common, Port, Function phases. */
1092 void (*reset_hw_cmn)(struct bnx2x *bp);
1093 void (*reset_hw_port)(struct bnx2x *bp);
1094 void (*reset_hw_func)(struct bnx2x *bp);
1095
1096 /* Init/Free GUNZIP resources */
1097 int (*gunzip_init)(struct bnx2x *bp);
1098 void (*gunzip_end)(struct bnx2x *bp);
1099
1100 /* Prepare/Release FW resources */
1101 int (*init_fw)(struct bnx2x *bp);
1102 void (*release_fw)(struct bnx2x *bp);
1103};
1104
1105struct bnx2x_func_sp_obj {
1106 enum bnx2x_func_state state, next_state;
1107
1108 /* BNX2X_FUNC_CMD_XX bits. This object implements "one
1109 * pending" paradigm but for debug and tracing purposes it's
1110 * more convinient to have different bits for different
1111 * commands.
1112 */
1113 unsigned long pending;
1114
1115 /* Buffer to use as a ramrod data and its mapping */
1116 void *rdata;
1117 dma_addr_t rdata_mapping;
1118
1119 /* this mutex validates that when pending flag is taken, the next
1120 * ramrod to be sent will be the one set the pending bit
1121 */
1122 struct mutex one_pending_mutex;
1123
1124 /* Driver interface */
1125 struct bnx2x_func_sp_drv_ops *drv;
1126
1127 /**
1128 * Performs one state change according to the given parameters.
1129 *
1130 * @return 0 in case of success and negative value otherwise.
1131 */
1132 int (*send_cmd)(struct bnx2x *bp,
1133 struct bnx2x_func_state_params *params);
1134
1135 /**
1136 * Checks that the requested state transition is legal.
1137 */
1138 int (*check_transition)(struct bnx2x *bp,
1139 struct bnx2x_func_sp_obj *o,
1140 struct bnx2x_func_state_params *params);
1141
1142 /**
1143 * Completes the pending command.
1144 */
1145 int (*complete_cmd)(struct bnx2x *bp,
1146 struct bnx2x_func_sp_obj *o,
1147 enum bnx2x_func_cmd cmd);
1148
1149 int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
1150 enum bnx2x_func_cmd cmd);
1151};
1152
1153/********************** Interfaces ********************************************/
1154/* Queueable objects set */
1155union bnx2x_qable_obj {
1156 struct bnx2x_vlan_mac_obj vlan_mac;
1157};
1158/************** Function state update *********/
1159void bnx2x_init_func_obj(struct bnx2x *bp,
1160 struct bnx2x_func_sp_obj *obj,
1161 void *rdata, dma_addr_t rdata_mapping,
1162 struct bnx2x_func_sp_drv_ops *drv_iface);
1163
1164int bnx2x_func_state_change(struct bnx2x *bp,
1165 struct bnx2x_func_state_params *params);
1166
1167enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
1168 struct bnx2x_func_sp_obj *o);
1169/******************* Queue State **************/
1170void bnx2x_init_queue_obj(struct bnx2x *bp,
1171 struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
1172 u8 cid_cnt, u8 func_id, void *rdata,
1173 dma_addr_t rdata_mapping, unsigned long type);
1174
1175int bnx2x_queue_state_change(struct bnx2x *bp,
1176 struct bnx2x_queue_state_params *params);
1177
1178/********************* VLAN-MAC ****************/
1179void bnx2x_init_mac_obj(struct bnx2x *bp,
1180 struct bnx2x_vlan_mac_obj *mac_obj,
1181 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1182 dma_addr_t rdata_mapping, int state,
1183 unsigned long *pstate, bnx2x_obj_type type,
1184 struct bnx2x_credit_pool_obj *macs_pool);
1185
1186void bnx2x_init_vlan_obj(struct bnx2x *bp,
1187 struct bnx2x_vlan_mac_obj *vlan_obj,
1188 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1189 dma_addr_t rdata_mapping, int state,
1190 unsigned long *pstate, bnx2x_obj_type type,
1191 struct bnx2x_credit_pool_obj *vlans_pool);
1192
1193void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1194 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1195 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1196 dma_addr_t rdata_mapping, int state,
1197 unsigned long *pstate, bnx2x_obj_type type,
1198 struct bnx2x_credit_pool_obj *macs_pool,
1199 struct bnx2x_credit_pool_obj *vlans_pool);
1200
1201int bnx2x_config_vlan_mac(struct bnx2x *bp,
1202 struct bnx2x_vlan_mac_ramrod_params *p);
1203
1204int bnx2x_vlan_mac_move(struct bnx2x *bp,
1205 struct bnx2x_vlan_mac_ramrod_params *p,
1206 struct bnx2x_vlan_mac_obj *dest_o);
1207
1208/********************* RX MODE ****************/
1209
1210void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
1211 struct bnx2x_rx_mode_obj *o);
1212
1213/**
1214 * Send and RX_MODE ramrod according to the provided parameters.
1215 *
1216 * @param bp
1217 * @param p Command parameters
1218 *
1219 * @return 0 - if operation was successfull and there is no pending completions,
1220 * positive number - if there are pending completions,
1221 * negative - if there were errors
1222 */
1223int bnx2x_config_rx_mode(struct bnx2x *bp,
1224 struct bnx2x_rx_mode_ramrod_params *p);
1225
1226/****************** MULTICASTS ****************/
1227
1228void bnx2x_init_mcast_obj(struct bnx2x *bp,
1229 struct bnx2x_mcast_obj *mcast_obj,
1230 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
1231 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
1232 int state, unsigned long *pstate,
1233 bnx2x_obj_type type);
1234
1235/**
1236 * Configure multicast MACs list. May configure a new list
1237 * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
1238 * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
1239 * configuration, continue to execute the pending commands
1240 * (BNX2X_MCAST_CMD_CONT).
1241 *
1242 * If previous command is still pending or if number of MACs to
1243 * configure is more that maximum number of MACs in one command,
1244 * the current command will be enqueued to the tail of the
1245 * pending commands list.
1246 *
1247 * @param bp
1248 * @param p
1249 * @param command to execute: BNX2X_MCAST_CMD_X
1250 *
1251 * @return 0 is operation was sucessfull and there are no pending completions,
1252 * negative if there were errors, positive if there are pending
1253 * completions.
1254 */
1255int bnx2x_config_mcast(struct bnx2x *bp,
1256 struct bnx2x_mcast_ramrod_params *p, int cmd);
1257
1258/****************** CREDIT POOL ****************/
1259void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
1260 struct bnx2x_credit_pool_obj *p, u8 func_id,
1261 u8 func_num);
1262void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
1263 struct bnx2x_credit_pool_obj *p, u8 func_id,
1264 u8 func_num);
1265
1266
1267/****************** RSS CONFIGURATION ****************/
1268void bnx2x_init_rss_config_obj(struct bnx2x *bp,
1269 struct bnx2x_rss_config_obj *rss_obj,
1270 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
1271 void *rdata, dma_addr_t rdata_mapping,
1272 int state, unsigned long *pstate,
1273 bnx2x_obj_type type);
1274
1275/**
1276 * Updates RSS configuration according to provided parameters.
1277 *
1278 * @param bp
1279 * @param p
1280 *
1281 * @return 0 in case of success
1282 */
1283int bnx2x_config_rss(struct bnx2x *bp,
1284 struct bnx2x_config_rss_params *p);
1285
1286/**
1287 * Return the current ind_table configuration.
1288 *
1289 * @param bp
1290 * @param ind_table buffer to fill with the current indirection
1291 * table content. Should be at least
1292 * T_ETH_INDIRECTION_TABLE_SIZE bytes long.
1293 */
1294void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
1295 u8 *ind_table);
1296
1297#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
new file mode 100644
index 00000000000..9908f2bbcf7
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -0,0 +1,1599 @@
1/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#include "bnx2x_stats.h"
18#include "bnx2x_cmn.h"
19
20
21/* Statistics */
22
23/*
24 * General service functions
25 */
26
27static inline long bnx2x_hilo(u32 *hiref)
28{
29 u32 lo = *(hiref + 1);
30#if (BITS_PER_LONG == 64)
31 u32 hi = *hiref;
32
33 return HILO_U64(hi, lo);
34#else
35 return lo;
36#endif
37}
38
39/*
40 * Init service functions
41 */
42
43/* Post the next statistics ramrod. Protect it with the spin in
44 * order to ensure the strict order between statistics ramrods
45 * (each ramrod has a sequence number passed in a
46 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
47 * sent in order).
48 */
49static void bnx2x_storm_stats_post(struct bnx2x *bp)
50{
51 if (!bp->stats_pending) {
52 int rc;
53
54 spin_lock_bh(&bp->stats_lock);
55
56 if (bp->stats_pending) {
57 spin_unlock_bh(&bp->stats_lock);
58 return;
59 }
60
61 bp->fw_stats_req->hdr.drv_stats_counter =
62 cpu_to_le16(bp->stats_counter++);
63
64 DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
65 bp->fw_stats_req->hdr.drv_stats_counter);
66
67
68
69 /* send FW stats ramrod */
70 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
71 U64_HI(bp->fw_stats_req_mapping),
72 U64_LO(bp->fw_stats_req_mapping),
73 NONE_CONNECTION_TYPE);
74 if (rc == 0)
75 bp->stats_pending = 1;
76
77 spin_unlock_bh(&bp->stats_lock);
78 }
79}
80
81static void bnx2x_hw_stats_post(struct bnx2x *bp)
82{
83 struct dmae_command *dmae = &bp->stats_dmae;
84 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
85
86 *stats_comp = DMAE_COMP_VAL;
87 if (CHIP_REV_IS_SLOW(bp))
88 return;
89
90 /* loader */
91 if (bp->executer_idx) {
92 int loader_idx = PMF_DMAE_C(bp);
93 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
94 true, DMAE_COMP_GRC);
95 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
96
97 memset(dmae, 0, sizeof(struct dmae_command));
98 dmae->opcode = opcode;
99 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
100 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
101 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
102 sizeof(struct dmae_command) *
103 (loader_idx + 1)) >> 2;
104 dmae->dst_addr_hi = 0;
105 dmae->len = sizeof(struct dmae_command) >> 2;
106 if (CHIP_IS_E1(bp))
107 dmae->len--;
108 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
109 dmae->comp_addr_hi = 0;
110 dmae->comp_val = 1;
111
112 *stats_comp = 0;
113 bnx2x_post_dmae(bp, dmae, loader_idx);
114
115 } else if (bp->func_stx) {
116 *stats_comp = 0;
117 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
118 }
119}
120
121static int bnx2x_stats_comp(struct bnx2x *bp)
122{
123 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
124 int cnt = 10;
125
126 might_sleep();
127 while (*stats_comp != DMAE_COMP_VAL) {
128 if (!cnt) {
129 BNX2X_ERR("timeout waiting for stats finished\n");
130 break;
131 }
132 cnt--;
133 usleep_range(1000, 1000);
134 }
135 return 1;
136}
137
138/*
139 * Statistics service functions
140 */
141
142static void bnx2x_stats_pmf_update(struct bnx2x *bp)
143{
144 struct dmae_command *dmae;
145 u32 opcode;
146 int loader_idx = PMF_DMAE_C(bp);
147 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
148
149 /* sanity */
150 if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
151 BNX2X_ERR("BUG!\n");
152 return;
153 }
154
155 bp->executer_idx = 0;
156
157 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
158
159 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
160 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
161 dmae->src_addr_lo = bp->port.port_stx >> 2;
162 dmae->src_addr_hi = 0;
163 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
164 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
165 dmae->len = DMAE_LEN32_RD_MAX;
166 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
167 dmae->comp_addr_hi = 0;
168 dmae->comp_val = 1;
169
170 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
171 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
172 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
173 dmae->src_addr_hi = 0;
174 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
175 DMAE_LEN32_RD_MAX * 4);
176 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
177 DMAE_LEN32_RD_MAX * 4);
178 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
179 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
180 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
181 dmae->comp_val = DMAE_COMP_VAL;
182
183 *stats_comp = 0;
184 bnx2x_hw_stats_post(bp);
185 bnx2x_stats_comp(bp);
186}
187
188static void bnx2x_port_stats_init(struct bnx2x *bp)
189{
190 struct dmae_command *dmae;
191 int port = BP_PORT(bp);
192 u32 opcode;
193 int loader_idx = PMF_DMAE_C(bp);
194 u32 mac_addr;
195 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
196
197 /* sanity */
198 if (!bp->link_vars.link_up || !bp->port.pmf) {
199 BNX2X_ERR("BUG!\n");
200 return;
201 }
202
203 bp->executer_idx = 0;
204
205 /* MCP */
206 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
207 true, DMAE_COMP_GRC);
208
209 if (bp->port.port_stx) {
210
211 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
212 dmae->opcode = opcode;
213 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
214 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
215 dmae->dst_addr_lo = bp->port.port_stx >> 2;
216 dmae->dst_addr_hi = 0;
217 dmae->len = sizeof(struct host_port_stats) >> 2;
218 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
219 dmae->comp_addr_hi = 0;
220 dmae->comp_val = 1;
221 }
222
223 if (bp->func_stx) {
224
225 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
226 dmae->opcode = opcode;
227 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
228 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
229 dmae->dst_addr_lo = bp->func_stx >> 2;
230 dmae->dst_addr_hi = 0;
231 dmae->len = sizeof(struct host_func_stats) >> 2;
232 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
233 dmae->comp_addr_hi = 0;
234 dmae->comp_val = 1;
235 }
236
237 /* MAC */
238 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
239 true, DMAE_COMP_GRC);
240
241 /* EMAC is special */
242 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
243 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
244
245 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
246 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
247 dmae->opcode = opcode;
248 dmae->src_addr_lo = (mac_addr +
249 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
250 dmae->src_addr_hi = 0;
251 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
252 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
253 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
254 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
255 dmae->comp_addr_hi = 0;
256 dmae->comp_val = 1;
257
258 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
260 dmae->opcode = opcode;
261 dmae->src_addr_lo = (mac_addr +
262 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
263 dmae->src_addr_hi = 0;
264 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
265 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
266 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
267 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
268 dmae->len = 1;
269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
270 dmae->comp_addr_hi = 0;
271 dmae->comp_val = 1;
272
273 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
275 dmae->opcode = opcode;
276 dmae->src_addr_lo = (mac_addr +
277 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
278 dmae->src_addr_hi = 0;
279 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
280 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
282 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
283 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
285 dmae->comp_addr_hi = 0;
286 dmae->comp_val = 1;
287 } else {
288 u32 tx_src_addr_lo, rx_src_addr_lo;
289 u16 rx_len, tx_len;
290
291 /* configure the params according to MAC type */
292 switch (bp->link_vars.mac_type) {
293 case MAC_TYPE_BMAC:
294 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
295 NIG_REG_INGRESS_BMAC0_MEM);
296
297 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
298 BIGMAC_REGISTER_TX_STAT_GTBYT */
299 if (CHIP_IS_E1x(bp)) {
300 tx_src_addr_lo = (mac_addr +
301 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
302 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
303 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
304 rx_src_addr_lo = (mac_addr +
305 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
306 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
307 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
308 } else {
309 tx_src_addr_lo = (mac_addr +
310 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
311 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
312 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
313 rx_src_addr_lo = (mac_addr +
314 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
315 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
316 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
317 }
318 break;
319
320 case MAC_TYPE_UMAC: /* handled by MSTAT */
321 case MAC_TYPE_XMAC: /* handled by MSTAT */
322 default:
323 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
324 tx_src_addr_lo = (mac_addr +
325 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
326 rx_src_addr_lo = (mac_addr +
327 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
328 tx_len = sizeof(bp->slowpath->
329 mac_stats.mstat_stats.stats_tx) >> 2;
330 rx_len = sizeof(bp->slowpath->
331 mac_stats.mstat_stats.stats_rx) >> 2;
332 break;
333 }
334
335 /* TX stats */
336 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
337 dmae->opcode = opcode;
338 dmae->src_addr_lo = tx_src_addr_lo;
339 dmae->src_addr_hi = 0;
340 dmae->len = tx_len;
341 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
342 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
343 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
344 dmae->comp_addr_hi = 0;
345 dmae->comp_val = 1;
346
347 /* RX stats */
348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
349 dmae->opcode = opcode;
350 dmae->src_addr_hi = 0;
351 dmae->src_addr_lo = rx_src_addr_lo;
352 dmae->dst_addr_lo =
353 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
354 dmae->dst_addr_hi =
355 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
356 dmae->len = rx_len;
357 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
358 dmae->comp_addr_hi = 0;
359 dmae->comp_val = 1;
360 }
361
362 /* NIG */
363 if (!CHIP_IS_E3(bp)) {
364 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
365 dmae->opcode = opcode;
366 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
367 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
368 dmae->src_addr_hi = 0;
369 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
370 offsetof(struct nig_stats, egress_mac_pkt0_lo));
371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
372 offsetof(struct nig_stats, egress_mac_pkt0_lo));
373 dmae->len = (2*sizeof(u32)) >> 2;
374 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
375 dmae->comp_addr_hi = 0;
376 dmae->comp_val = 1;
377
378 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
379 dmae->opcode = opcode;
380 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
381 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
382 dmae->src_addr_hi = 0;
383 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
384 offsetof(struct nig_stats, egress_mac_pkt1_lo));
385 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
386 offsetof(struct nig_stats, egress_mac_pkt1_lo));
387 dmae->len = (2*sizeof(u32)) >> 2;
388 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
389 dmae->comp_addr_hi = 0;
390 dmae->comp_val = 1;
391 }
392
393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
394 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
395 true, DMAE_COMP_PCI);
396 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
397 NIG_REG_STAT0_BRB_DISCARD) >> 2;
398 dmae->src_addr_hi = 0;
399 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
401 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
402
403 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
404 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
405 dmae->comp_val = DMAE_COMP_VAL;
406
407 *stats_comp = 0;
408}
409
410static void bnx2x_func_stats_init(struct bnx2x *bp)
411{
412 struct dmae_command *dmae = &bp->stats_dmae;
413 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
414
415 /* sanity */
416 if (!bp->func_stx) {
417 BNX2X_ERR("BUG!\n");
418 return;
419 }
420
421 bp->executer_idx = 0;
422 memset(dmae, 0, sizeof(struct dmae_command));
423
424 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
425 true, DMAE_COMP_PCI);
426 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
427 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
428 dmae->dst_addr_lo = bp->func_stx >> 2;
429 dmae->dst_addr_hi = 0;
430 dmae->len = sizeof(struct host_func_stats) >> 2;
431 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
432 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
433 dmae->comp_val = DMAE_COMP_VAL;
434
435 *stats_comp = 0;
436}
437
438static void bnx2x_stats_start(struct bnx2x *bp)
439{
440 if (bp->port.pmf)
441 bnx2x_port_stats_init(bp);
442
443 else if (bp->func_stx)
444 bnx2x_func_stats_init(bp);
445
446 bnx2x_hw_stats_post(bp);
447 bnx2x_storm_stats_post(bp);
448}
449
450static void bnx2x_stats_pmf_start(struct bnx2x *bp)
451{
452 bnx2x_stats_comp(bp);
453 bnx2x_stats_pmf_update(bp);
454 bnx2x_stats_start(bp);
455}
456
457static void bnx2x_stats_restart(struct bnx2x *bp)
458{
459 bnx2x_stats_comp(bp);
460 bnx2x_stats_start(bp);
461}
462
463static void bnx2x_bmac_stats_update(struct bnx2x *bp)
464{
465 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
466 struct bnx2x_eth_stats *estats = &bp->eth_stats;
467 struct {
468 u32 lo;
469 u32 hi;
470 } diff;
471
472 if (CHIP_IS_E1x(bp)) {
473 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
474
475 /* the macros below will use "bmac1_stats" type */
476 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
477 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
478 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
479 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
480 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
481 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
482 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
483 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
484 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
485
486 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
487 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
488 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
489 UPDATE_STAT64(tx_stat_gt127,
490 tx_stat_etherstatspkts65octetsto127octets);
491 UPDATE_STAT64(tx_stat_gt255,
492 tx_stat_etherstatspkts128octetsto255octets);
493 UPDATE_STAT64(tx_stat_gt511,
494 tx_stat_etherstatspkts256octetsto511octets);
495 UPDATE_STAT64(tx_stat_gt1023,
496 tx_stat_etherstatspkts512octetsto1023octets);
497 UPDATE_STAT64(tx_stat_gt1518,
498 tx_stat_etherstatspkts1024octetsto1522octets);
499 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
500 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
501 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
502 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
503 UPDATE_STAT64(tx_stat_gterr,
504 tx_stat_dot3statsinternalmactransmiterrors);
505 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
506
507 } else {
508 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
509
510 /* the macros below will use "bmac2_stats" type */
511 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
512 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
513 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
514 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
515 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
516 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
517 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
518 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
519 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
520 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
521 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
522 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
523 UPDATE_STAT64(tx_stat_gt127,
524 tx_stat_etherstatspkts65octetsto127octets);
525 UPDATE_STAT64(tx_stat_gt255,
526 tx_stat_etherstatspkts128octetsto255octets);
527 UPDATE_STAT64(tx_stat_gt511,
528 tx_stat_etherstatspkts256octetsto511octets);
529 UPDATE_STAT64(tx_stat_gt1023,
530 tx_stat_etherstatspkts512octetsto1023octets);
531 UPDATE_STAT64(tx_stat_gt1518,
532 tx_stat_etherstatspkts1024octetsto1522octets);
533 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
534 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
535 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
536 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
537 UPDATE_STAT64(tx_stat_gterr,
538 tx_stat_dot3statsinternalmactransmiterrors);
539 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
540 }
541
542 estats->pause_frames_received_hi =
543 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
544 estats->pause_frames_received_lo =
545 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
546
547 estats->pause_frames_sent_hi =
548 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
549 estats->pause_frames_sent_lo =
550 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
551}
552
553static void bnx2x_mstat_stats_update(struct bnx2x *bp)
554{
555 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
556 struct bnx2x_eth_stats *estats = &bp->eth_stats;
557
558 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
559
560 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
561 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
562 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
563 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
564 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
565 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
566 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
567 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
568 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
569 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
570
571
572 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
573 ADD_STAT64(stats_tx.tx_gt127,
574 tx_stat_etherstatspkts65octetsto127octets);
575 ADD_STAT64(stats_tx.tx_gt255,
576 tx_stat_etherstatspkts128octetsto255octets);
577 ADD_STAT64(stats_tx.tx_gt511,
578 tx_stat_etherstatspkts256octetsto511octets);
579 ADD_STAT64(stats_tx.tx_gt1023,
580 tx_stat_etherstatspkts512octetsto1023octets);
581 ADD_STAT64(stats_tx.tx_gt1518,
582 tx_stat_etherstatspkts1024octetsto1522octets);
583 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
584
585 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
586 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
587 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
588
589 ADD_STAT64(stats_tx.tx_gterr,
590 tx_stat_dot3statsinternalmactransmiterrors);
591 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
592
593 ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
594 new->stats_tx.tx_gt1518_hi,
595 estats->etherstatspkts1024octetsto1522octets_lo,
596 new->stats_tx.tx_gt1518_lo);
597
598 ADD_64(estats->etherstatspktsover1522octets_hi,
599 new->stats_tx.tx_gt2047_hi,
600 estats->etherstatspktsover1522octets_lo,
601 new->stats_tx.tx_gt2047_lo);
602
603 ADD_64(estats->etherstatspktsover1522octets_hi,
604 new->stats_tx.tx_gt4095_hi,
605 estats->etherstatspktsover1522octets_lo,
606 new->stats_tx.tx_gt4095_lo);
607
608 ADD_64(estats->etherstatspktsover1522octets_hi,
609 new->stats_tx.tx_gt9216_hi,
610 estats->etherstatspktsover1522octets_lo,
611 new->stats_tx.tx_gt9216_lo);
612
613
614 ADD_64(estats->etherstatspktsover1522octets_hi,
615 new->stats_tx.tx_gt16383_hi,
616 estats->etherstatspktsover1522octets_lo,
617 new->stats_tx.tx_gt16383_lo);
618
619 estats->pause_frames_received_hi =
620 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
621 estats->pause_frames_received_lo =
622 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
623
624 estats->pause_frames_sent_hi =
625 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
626 estats->pause_frames_sent_lo =
627 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
628}
629
630static void bnx2x_emac_stats_update(struct bnx2x *bp)
631{
632 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
633 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
634 struct bnx2x_eth_stats *estats = &bp->eth_stats;
635
636 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
637 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
638 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
639 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
640 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
641 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
642 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
643 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
644 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
645 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
646 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
647 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
648 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
649 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
650 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
651 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
652 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
653 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
654 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
655 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
656 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
657 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
658 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
659 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
660 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
661 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
662 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
665 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
666 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
667
668 estats->pause_frames_received_hi =
669 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
670 estats->pause_frames_received_lo =
671 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
672 ADD_64(estats->pause_frames_received_hi,
673 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
674 estats->pause_frames_received_lo,
675 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
676
677 estats->pause_frames_sent_hi =
678 pstats->mac_stx[1].tx_stat_outxonsent_hi;
679 estats->pause_frames_sent_lo =
680 pstats->mac_stx[1].tx_stat_outxonsent_lo;
681 ADD_64(estats->pause_frames_sent_hi,
682 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
683 estats->pause_frames_sent_lo,
684 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
685}
686
687static int bnx2x_hw_stats_update(struct bnx2x *bp)
688{
689 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
690 struct nig_stats *old = &(bp->port.old_nig_stats);
691 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
692 struct bnx2x_eth_stats *estats = &bp->eth_stats;
693 struct {
694 u32 lo;
695 u32 hi;
696 } diff;
697
698 switch (bp->link_vars.mac_type) {
699 case MAC_TYPE_BMAC:
700 bnx2x_bmac_stats_update(bp);
701 break;
702
703 case MAC_TYPE_EMAC:
704 bnx2x_emac_stats_update(bp);
705 break;
706
707 case MAC_TYPE_UMAC:
708 case MAC_TYPE_XMAC:
709 bnx2x_mstat_stats_update(bp);
710 break;
711
712 case MAC_TYPE_NONE: /* unreached */
713 DP(BNX2X_MSG_STATS,
714 "stats updated by DMAE but no MAC active\n");
715 return -1;
716
717 default: /* unreached */
718 BNX2X_ERR("Unknown MAC type\n");
719 }
720
721 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
722 new->brb_discard - old->brb_discard);
723 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
724 new->brb_truncate - old->brb_truncate);
725
726 if (!CHIP_IS_E3(bp)) {
727 UPDATE_STAT64_NIG(egress_mac_pkt0,
728 etherstatspkts1024octetsto1522octets);
729 UPDATE_STAT64_NIG(egress_mac_pkt1,
730 etherstatspktsover1522octets);
731 }
732
733 memcpy(old, new, sizeof(struct nig_stats));
734
735 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
736 sizeof(struct mac_stx));
737 estats->brb_drop_hi = pstats->brb_drop_hi;
738 estats->brb_drop_lo = pstats->brb_drop_lo;
739
740 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
741
742 if (!BP_NOMCP(bp)) {
743 u32 nig_timer_max =
744 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
745 if (nig_timer_max != estats->nig_timer_max) {
746 estats->nig_timer_max = nig_timer_max;
747 BNX2X_ERR("NIG timer max (%u)\n",
748 estats->nig_timer_max);
749 }
750 }
751
752 return 0;
753}
754
755static int bnx2x_storm_stats_update(struct bnx2x *bp)
756{
757 struct tstorm_per_port_stats *tport =
758 &bp->fw_stats_data->port.tstorm_port_statistics;
759 struct tstorm_per_pf_stats *tfunc =
760 &bp->fw_stats_data->pf.tstorm_pf_statistics;
761 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
762 struct bnx2x_eth_stats *estats = &bp->eth_stats;
763 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
764 int i;
765 u16 cur_stats_counter;
766
767 /* Make sure we use the value of the counter
768 * used for sending the last stats ramrod.
769 */
770 spin_lock_bh(&bp->stats_lock);
771 cur_stats_counter = bp->stats_counter - 1;
772 spin_unlock_bh(&bp->stats_lock);
773
774 /* are storm stats valid? */
775 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
776 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
777 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
778 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
779 return -EAGAIN;
780 }
781
782 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
783 DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
784 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
785 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
786 return -EAGAIN;
787 }
788
789 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
790 DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
791 " cstorm counter (0x%x) != stats_counter (0x%x)\n",
792 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
793 return -EAGAIN;
794 }
795
796 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
797 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
798 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
799 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
800 return -EAGAIN;
801 }
802
803 memcpy(&(fstats->total_bytes_received_hi),
804 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
805 sizeof(struct host_func_stats) - 2*sizeof(u32));
806 estats->error_bytes_received_hi = 0;
807 estats->error_bytes_received_lo = 0;
808 estats->etherstatsoverrsizepkts_hi = 0;
809 estats->etherstatsoverrsizepkts_lo = 0;
810 estats->no_buff_discard_hi = 0;
811 estats->no_buff_discard_lo = 0;
812 estats->total_tpa_aggregations_hi = 0;
813 estats->total_tpa_aggregations_lo = 0;
814 estats->total_tpa_aggregated_frames_hi = 0;
815 estats->total_tpa_aggregated_frames_lo = 0;
816 estats->total_tpa_bytes_hi = 0;
817 estats->total_tpa_bytes_lo = 0;
818
819 for_each_eth_queue(bp, i) {
820 struct bnx2x_fastpath *fp = &bp->fp[i];
821 struct tstorm_per_queue_stats *tclient =
822 &bp->fw_stats_data->queue_stats[i].
823 tstorm_queue_statistics;
824 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
825 struct ustorm_per_queue_stats *uclient =
826 &bp->fw_stats_data->queue_stats[i].
827 ustorm_queue_statistics;
828 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
829 struct xstorm_per_queue_stats *xclient =
830 &bp->fw_stats_data->queue_stats[i].
831 xstorm_queue_statistics;
832 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
833 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
834 u32 diff;
835
836 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
837 "bcast_sent 0x%x mcast_sent 0x%x\n",
838 i, xclient->ucast_pkts_sent,
839 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
840
841 DP(BNX2X_MSG_STATS, "---------------\n");
842
843 qstats->total_broadcast_bytes_received_hi =
844 le32_to_cpu(tclient->rcv_bcast_bytes.hi);
845 qstats->total_broadcast_bytes_received_lo =
846 le32_to_cpu(tclient->rcv_bcast_bytes.lo);
847
848 qstats->total_multicast_bytes_received_hi =
849 le32_to_cpu(tclient->rcv_mcast_bytes.hi);
850 qstats->total_multicast_bytes_received_lo =
851 le32_to_cpu(tclient->rcv_mcast_bytes.lo);
852
853 qstats->total_unicast_bytes_received_hi =
854 le32_to_cpu(tclient->rcv_ucast_bytes.hi);
855 qstats->total_unicast_bytes_received_lo =
856 le32_to_cpu(tclient->rcv_ucast_bytes.lo);
857
858 /*
859 * sum to total_bytes_received all
860 * unicast/multicast/broadcast
861 */
862 qstats->total_bytes_received_hi =
863 qstats->total_broadcast_bytes_received_hi;
864 qstats->total_bytes_received_lo =
865 qstats->total_broadcast_bytes_received_lo;
866
867 ADD_64(qstats->total_bytes_received_hi,
868 qstats->total_multicast_bytes_received_hi,
869 qstats->total_bytes_received_lo,
870 qstats->total_multicast_bytes_received_lo);
871
872 ADD_64(qstats->total_bytes_received_hi,
873 qstats->total_unicast_bytes_received_hi,
874 qstats->total_bytes_received_lo,
875 qstats->total_unicast_bytes_received_lo);
876
877 qstats->valid_bytes_received_hi =
878 qstats->total_bytes_received_hi;
879 qstats->valid_bytes_received_lo =
880 qstats->total_bytes_received_lo;
881
882
883 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
884 total_unicast_packets_received);
885 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
886 total_multicast_packets_received);
887 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
888 total_broadcast_packets_received);
889 UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
890 etherstatsoverrsizepkts);
891 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
892
893 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
894 total_unicast_packets_received);
895 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
896 total_multicast_packets_received);
897 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
898 total_broadcast_packets_received);
899 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
900 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
901 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
902
903 qstats->total_broadcast_bytes_transmitted_hi =
904 le32_to_cpu(xclient->bcast_bytes_sent.hi);
905 qstats->total_broadcast_bytes_transmitted_lo =
906 le32_to_cpu(xclient->bcast_bytes_sent.lo);
907
908 qstats->total_multicast_bytes_transmitted_hi =
909 le32_to_cpu(xclient->mcast_bytes_sent.hi);
910 qstats->total_multicast_bytes_transmitted_lo =
911 le32_to_cpu(xclient->mcast_bytes_sent.lo);
912
913 qstats->total_unicast_bytes_transmitted_hi =
914 le32_to_cpu(xclient->ucast_bytes_sent.hi);
915 qstats->total_unicast_bytes_transmitted_lo =
916 le32_to_cpu(xclient->ucast_bytes_sent.lo);
917 /*
918 * sum to total_bytes_transmitted all
919 * unicast/multicast/broadcast
920 */
921 qstats->total_bytes_transmitted_hi =
922 qstats->total_unicast_bytes_transmitted_hi;
923 qstats->total_bytes_transmitted_lo =
924 qstats->total_unicast_bytes_transmitted_lo;
925
926 ADD_64(qstats->total_bytes_transmitted_hi,
927 qstats->total_broadcast_bytes_transmitted_hi,
928 qstats->total_bytes_transmitted_lo,
929 qstats->total_broadcast_bytes_transmitted_lo);
930
931 ADD_64(qstats->total_bytes_transmitted_hi,
932 qstats->total_multicast_bytes_transmitted_hi,
933 qstats->total_bytes_transmitted_lo,
934 qstats->total_multicast_bytes_transmitted_lo);
935
936 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
937 total_unicast_packets_transmitted);
938 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
939 total_multicast_packets_transmitted);
940 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
941 total_broadcast_packets_transmitted);
942
943 UPDATE_EXTEND_TSTAT(checksum_discard,
944 total_packets_received_checksum_discarded);
945 UPDATE_EXTEND_TSTAT(ttl0_discard,
946 total_packets_received_ttl0_discarded);
947
948 UPDATE_EXTEND_XSTAT(error_drop_pkts,
949 total_transmitted_dropped_packets_error);
950
951 /* TPA aggregations completed */
952 UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
953 /* Number of network frames aggregated by TPA */
954 UPDATE_EXTEND_USTAT(coalesced_pkts,
955 total_tpa_aggregated_frames);
956 /* Total number of bytes in completed TPA aggregations */
957 qstats->total_tpa_bytes_lo =
958 le32_to_cpu(uclient->coalesced_bytes.lo);
959 qstats->total_tpa_bytes_hi =
960 le32_to_cpu(uclient->coalesced_bytes.hi);
961
962 /* TPA stats per-function */
963 ADD_64(estats->total_tpa_aggregations_hi,
964 qstats->total_tpa_aggregations_hi,
965 estats->total_tpa_aggregations_lo,
966 qstats->total_tpa_aggregations_lo);
967 ADD_64(estats->total_tpa_aggregated_frames_hi,
968 qstats->total_tpa_aggregated_frames_hi,
969 estats->total_tpa_aggregated_frames_lo,
970 qstats->total_tpa_aggregated_frames_lo);
971 ADD_64(estats->total_tpa_bytes_hi,
972 qstats->total_tpa_bytes_hi,
973 estats->total_tpa_bytes_lo,
974 qstats->total_tpa_bytes_lo);
975
976 ADD_64(fstats->total_bytes_received_hi,
977 qstats->total_bytes_received_hi,
978 fstats->total_bytes_received_lo,
979 qstats->total_bytes_received_lo);
980 ADD_64(fstats->total_bytes_transmitted_hi,
981 qstats->total_bytes_transmitted_hi,
982 fstats->total_bytes_transmitted_lo,
983 qstats->total_bytes_transmitted_lo);
984 ADD_64(fstats->total_unicast_packets_received_hi,
985 qstats->total_unicast_packets_received_hi,
986 fstats->total_unicast_packets_received_lo,
987 qstats->total_unicast_packets_received_lo);
988 ADD_64(fstats->total_multicast_packets_received_hi,
989 qstats->total_multicast_packets_received_hi,
990 fstats->total_multicast_packets_received_lo,
991 qstats->total_multicast_packets_received_lo);
992 ADD_64(fstats->total_broadcast_packets_received_hi,
993 qstats->total_broadcast_packets_received_hi,
994 fstats->total_broadcast_packets_received_lo,
995 qstats->total_broadcast_packets_received_lo);
996 ADD_64(fstats->total_unicast_packets_transmitted_hi,
997 qstats->total_unicast_packets_transmitted_hi,
998 fstats->total_unicast_packets_transmitted_lo,
999 qstats->total_unicast_packets_transmitted_lo);
1000 ADD_64(fstats->total_multicast_packets_transmitted_hi,
1001 qstats->total_multicast_packets_transmitted_hi,
1002 fstats->total_multicast_packets_transmitted_lo,
1003 qstats->total_multicast_packets_transmitted_lo);
1004 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
1005 qstats->total_broadcast_packets_transmitted_hi,
1006 fstats->total_broadcast_packets_transmitted_lo,
1007 qstats->total_broadcast_packets_transmitted_lo);
1008 ADD_64(fstats->valid_bytes_received_hi,
1009 qstats->valid_bytes_received_hi,
1010 fstats->valid_bytes_received_lo,
1011 qstats->valid_bytes_received_lo);
1012
1013 ADD_64(estats->etherstatsoverrsizepkts_hi,
1014 qstats->etherstatsoverrsizepkts_hi,
1015 estats->etherstatsoverrsizepkts_lo,
1016 qstats->etherstatsoverrsizepkts_lo);
1017 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
1018 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
1019 }
1020
1021 ADD_64(fstats->total_bytes_received_hi,
1022 estats->rx_stat_ifhcinbadoctets_hi,
1023 fstats->total_bytes_received_lo,
1024 estats->rx_stat_ifhcinbadoctets_lo);
1025
1026 ADD_64(fstats->total_bytes_received_hi,
1027 tfunc->rcv_error_bytes.hi,
1028 fstats->total_bytes_received_lo,
1029 tfunc->rcv_error_bytes.lo);
1030
1031 memcpy(estats, &(fstats->total_bytes_received_hi),
1032 sizeof(struct host_func_stats) - 2*sizeof(u32));
1033
1034 ADD_64(estats->error_bytes_received_hi,
1035 tfunc->rcv_error_bytes.hi,
1036 estats->error_bytes_received_lo,
1037 tfunc->rcv_error_bytes.lo);
1038
1039 ADD_64(estats->etherstatsoverrsizepkts_hi,
1040 estats->rx_stat_dot3statsframestoolong_hi,
1041 estats->etherstatsoverrsizepkts_lo,
1042 estats->rx_stat_dot3statsframestoolong_lo);
1043 ADD_64(estats->error_bytes_received_hi,
1044 estats->rx_stat_ifhcinbadoctets_hi,
1045 estats->error_bytes_received_lo,
1046 estats->rx_stat_ifhcinbadoctets_lo);
1047
1048 if (bp->port.pmf) {
1049 estats->mac_filter_discard =
1050 le32_to_cpu(tport->mac_filter_discard);
1051 estats->mf_tag_discard =
1052 le32_to_cpu(tport->mf_tag_discard);
1053 estats->brb_truncate_discard =
1054 le32_to_cpu(tport->brb_truncate_discard);
1055 estats->mac_discard = le32_to_cpu(tport->mac_discard);
1056 }
1057
1058 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1059
1060 bp->stats_pending = 0;
1061
1062 return 0;
1063}
1064
1065static void bnx2x_net_stats_update(struct bnx2x *bp)
1066{
1067 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1068 struct net_device_stats *nstats = &bp->dev->stats;
1069 unsigned long tmp;
1070 int i;
1071
1072 nstats->rx_packets =
1073 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1074 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1075 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1076
1077 nstats->tx_packets =
1078 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1079 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1080 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1081
1082 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1083
1084 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1085
1086 tmp = estats->mac_discard;
1087 for_each_rx_queue(bp, i)
1088 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
1089 nstats->rx_dropped = tmp;
1090
1091 nstats->tx_dropped = 0;
1092
1093 nstats->multicast =
1094 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1095
1096 nstats->collisions =
1097 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1098
1099 nstats->rx_length_errors =
1100 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1101 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1102 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1103 bnx2x_hilo(&estats->brb_truncate_hi);
1104 nstats->rx_crc_errors =
1105 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1106 nstats->rx_frame_errors =
1107 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1108 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1109 nstats->rx_missed_errors = 0;
1110
1111 nstats->rx_errors = nstats->rx_length_errors +
1112 nstats->rx_over_errors +
1113 nstats->rx_crc_errors +
1114 nstats->rx_frame_errors +
1115 nstats->rx_fifo_errors +
1116 nstats->rx_missed_errors;
1117
1118 nstats->tx_aborted_errors =
1119 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1120 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1121 nstats->tx_carrier_errors =
1122 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1123 nstats->tx_fifo_errors = 0;
1124 nstats->tx_heartbeat_errors = 0;
1125 nstats->tx_window_errors = 0;
1126
1127 nstats->tx_errors = nstats->tx_aborted_errors +
1128 nstats->tx_carrier_errors +
1129 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1130}
1131
1132static void bnx2x_drv_stats_update(struct bnx2x *bp)
1133{
1134 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1135 int i;
1136
1137 estats->driver_xoff = 0;
1138 estats->rx_err_discard_pkt = 0;
1139 estats->rx_skb_alloc_failed = 0;
1140 estats->hw_csum_err = 0;
1141 for_each_queue(bp, i) {
1142 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
1143
1144 estats->driver_xoff += qstats->driver_xoff;
1145 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
1146 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
1147 estats->hw_csum_err += qstats->hw_csum_err;
1148 }
1149}
1150
1151static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1152{
1153 u32 val;
1154
1155 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1156 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1157
1158 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1159 return true;
1160 }
1161
1162 return false;
1163}
1164
1165static void bnx2x_stats_update(struct bnx2x *bp)
1166{
1167 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1168
1169 if (bnx2x_edebug_stats_stopped(bp))
1170 return;
1171
1172 if (*stats_comp != DMAE_COMP_VAL)
1173 return;
1174
1175 if (bp->port.pmf)
1176 bnx2x_hw_stats_update(bp);
1177
1178 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1179 BNX2X_ERR("storm stats were not updated for 3 times\n");
1180 bnx2x_panic();
1181 return;
1182 }
1183
1184 bnx2x_net_stats_update(bp);
1185 bnx2x_drv_stats_update(bp);
1186
1187 if (netif_msg_timer(bp)) {
1188 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1189 int i, cos;
1190
1191 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
1192 estats->brb_drop_lo, estats->brb_truncate_lo);
1193
1194 for_each_eth_queue(bp, i) {
1195 struct bnx2x_fastpath *fp = &bp->fp[i];
1196 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1197
1198 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
1199 " rx pkt(%lu) rx calls(%lu %lu)\n",
1200 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
1201 fp->rx_comp_cons),
1202 le16_to_cpu(*fp->rx_cons_sb),
1203 bnx2x_hilo(&qstats->
1204 total_unicast_packets_received_hi),
1205 fp->rx_calls, fp->rx_pkt);
1206 }
1207
1208 for_each_eth_queue(bp, i) {
1209 struct bnx2x_fastpath *fp = &bp->fp[i];
1210 struct bnx2x_fp_txdata *txdata;
1211 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1212 struct netdev_queue *txq;
1213
1214 printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)",
1215 fp->name, bnx2x_hilo(
1216 &qstats->total_unicast_packets_transmitted_hi),
1217 qstats->driver_xoff);
1218
1219 for_each_cos_in_tx_queue(fp, cos) {
1220 txdata = &fp->txdata[cos];
1221 txq = netdev_get_tx_queue(bp->dev,
1222 FP_COS_TO_TXQ(fp, cos));
1223
1224 printk(KERN_DEBUG "%d: tx avail(%4u)"
1225 " *tx_cons_sb(%u)"
1226 " tx calls (%lu)"
1227 " %s\n",
1228 cos,
1229 bnx2x_tx_avail(bp, txdata),
1230 le16_to_cpu(*txdata->tx_cons_sb),
1231 txdata->tx_pkt,
1232 (netif_tx_queue_stopped(txq) ?
1233 "Xoff" : "Xon")
1234 );
1235 }
1236 }
1237 }
1238
1239 bnx2x_hw_stats_post(bp);
1240 bnx2x_storm_stats_post(bp);
1241}
1242
1243static void bnx2x_port_stats_stop(struct bnx2x *bp)
1244{
1245 struct dmae_command *dmae;
1246 u32 opcode;
1247 int loader_idx = PMF_DMAE_C(bp);
1248 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1249
1250 bp->executer_idx = 0;
1251
1252 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1253
1254 if (bp->port.port_stx) {
1255
1256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1257 if (bp->func_stx)
1258 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1259 opcode, DMAE_COMP_GRC);
1260 else
1261 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1262 opcode, DMAE_COMP_PCI);
1263
1264 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1265 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1266 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1267 dmae->dst_addr_hi = 0;
1268 dmae->len = sizeof(struct host_port_stats) >> 2;
1269 if (bp->func_stx) {
1270 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1271 dmae->comp_addr_hi = 0;
1272 dmae->comp_val = 1;
1273 } else {
1274 dmae->comp_addr_lo =
1275 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1276 dmae->comp_addr_hi =
1277 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1278 dmae->comp_val = DMAE_COMP_VAL;
1279
1280 *stats_comp = 0;
1281 }
1282 }
1283
1284 if (bp->func_stx) {
1285
1286 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1287 dmae->opcode =
1288 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1289 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1290 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1291 dmae->dst_addr_lo = bp->func_stx >> 2;
1292 dmae->dst_addr_hi = 0;
1293 dmae->len = sizeof(struct host_func_stats) >> 2;
1294 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1295 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1296 dmae->comp_val = DMAE_COMP_VAL;
1297
1298 *stats_comp = 0;
1299 }
1300}
1301
1302static void bnx2x_stats_stop(struct bnx2x *bp)
1303{
1304 int update = 0;
1305
1306 bnx2x_stats_comp(bp);
1307
1308 if (bp->port.pmf)
1309 update = (bnx2x_hw_stats_update(bp) == 0);
1310
1311 update |= (bnx2x_storm_stats_update(bp) == 0);
1312
1313 if (update) {
1314 bnx2x_net_stats_update(bp);
1315
1316 if (bp->port.pmf)
1317 bnx2x_port_stats_stop(bp);
1318
1319 bnx2x_hw_stats_post(bp);
1320 bnx2x_stats_comp(bp);
1321 }
1322}
1323
1324static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1325{
1326}
1327
1328static const struct {
1329 void (*action)(struct bnx2x *bp);
1330 enum bnx2x_stats_state next_state;
1331} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1332/* state event */
1333{
1334/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1335/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1336/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1337/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1338},
1339{
1340/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1341/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1342/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1343/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1344}
1345};
1346
1347void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1348{
1349 enum bnx2x_stats_state state;
1350 if (unlikely(bp->panic))
1351 return;
1352 bnx2x_stats_stm[bp->stats_state][event].action(bp);
1353 spin_lock_bh(&bp->stats_lock);
1354 state = bp->stats_state;
1355 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1356 spin_unlock_bh(&bp->stats_lock);
1357
1358 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1359 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1360 state, event, bp->stats_state);
1361}
1362
1363static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1364{
1365 struct dmae_command *dmae;
1366 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1367
1368 /* sanity */
1369 if (!bp->port.pmf || !bp->port.port_stx) {
1370 BNX2X_ERR("BUG!\n");
1371 return;
1372 }
1373
1374 bp->executer_idx = 0;
1375
1376 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1377 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1378 true, DMAE_COMP_PCI);
1379 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1380 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1381 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1382 dmae->dst_addr_hi = 0;
1383 dmae->len = sizeof(struct host_port_stats) >> 2;
1384 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1385 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1386 dmae->comp_val = DMAE_COMP_VAL;
1387
1388 *stats_comp = 0;
1389 bnx2x_hw_stats_post(bp);
1390 bnx2x_stats_comp(bp);
1391}
1392
1393static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1394{
1395 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
1396 u32 func_stx;
1397
1398 /* sanity */
1399 if (!bp->port.pmf || !bp->func_stx) {
1400 BNX2X_ERR("BUG!\n");
1401 return;
1402 }
1403
1404 /* save our func_stx */
1405 func_stx = bp->func_stx;
1406
1407 for (vn = VN_0; vn < vn_max; vn++) {
1408 int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
1409
1410 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1411 bnx2x_func_stats_init(bp);
1412 bnx2x_hw_stats_post(bp);
1413 bnx2x_stats_comp(bp);
1414 }
1415
1416 /* restore our func_stx */
1417 bp->func_stx = func_stx;
1418}
1419
1420static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1421{
1422 struct dmae_command *dmae = &bp->stats_dmae;
1423 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1424
1425 /* sanity */
1426 if (!bp->func_stx) {
1427 BNX2X_ERR("BUG!\n");
1428 return;
1429 }
1430
1431 bp->executer_idx = 0;
1432 memset(dmae, 0, sizeof(struct dmae_command));
1433
1434 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
1435 true, DMAE_COMP_PCI);
1436 dmae->src_addr_lo = bp->func_stx >> 2;
1437 dmae->src_addr_hi = 0;
1438 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
1439 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
1440 dmae->len = sizeof(struct host_func_stats) >> 2;
1441 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1442 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1443 dmae->comp_val = DMAE_COMP_VAL;
1444
1445 *stats_comp = 0;
1446 bnx2x_hw_stats_post(bp);
1447 bnx2x_stats_comp(bp);
1448}
1449
1450/**
1451 * This function will prepare the statistics ramrod data the way
1452 * we will only have to increment the statistics counter and
1453 * send the ramrod each time we have to.
1454 *
1455 * @param bp
1456 */
1457static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1458{
1459 int i;
1460 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1461
1462 dma_addr_t cur_data_offset;
1463 struct stats_query_entry *cur_query_entry;
1464
1465 stats_hdr->cmd_num = bp->fw_stats_num;
1466 stats_hdr->drv_stats_counter = 0;
1467
1468 /* storm_counters struct contains the counters of completed
1469 * statistics requests per storm which are incremented by FW
1470 * each time it completes hadning a statistics ramrod. We will
1471 * check these counters in the timer handler and discard a
1472 * (statistics) ramrod completion.
1473 */
1474 cur_data_offset = bp->fw_stats_data_mapping +
1475 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1476
1477 stats_hdr->stats_counters_addrs.hi =
1478 cpu_to_le32(U64_HI(cur_data_offset));
1479 stats_hdr->stats_counters_addrs.lo =
1480 cpu_to_le32(U64_LO(cur_data_offset));
1481
1482 /* prepare to the first stats ramrod (will be completed with
1483 * the counters equal to zero) - init counters to somethig different.
1484 */
1485 memset(&bp->fw_stats_data->storm_counters, 0xff,
1486 sizeof(struct stats_counter));
1487
1488 /**** Port FW statistics data ****/
1489 cur_data_offset = bp->fw_stats_data_mapping +
1490 offsetof(struct bnx2x_fw_stats_data, port);
1491
1492 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1493
1494 cur_query_entry->kind = STATS_TYPE_PORT;
1495 /* For port query index is a DONT CARE */
1496 cur_query_entry->index = BP_PORT(bp);
1497 /* For port query funcID is a DONT CARE */
1498 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1499 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1500 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1501
1502 /**** PF FW statistics data ****/
1503 cur_data_offset = bp->fw_stats_data_mapping +
1504 offsetof(struct bnx2x_fw_stats_data, pf);
1505
1506 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1507
1508 cur_query_entry->kind = STATS_TYPE_PF;
1509 /* For PF query index is a DONT CARE */
1510 cur_query_entry->index = BP_PORT(bp);
1511 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1512 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1513 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1514
1515 /**** Clients' queries ****/
1516 cur_data_offset = bp->fw_stats_data_mapping +
1517 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1518
1519 for_each_eth_queue(bp, i) {
1520 cur_query_entry =
1521 &bp->fw_stats_req->
1522 query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
1523
1524 cur_query_entry->kind = STATS_TYPE_QUEUE;
1525 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1526 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1527 cur_query_entry->address.hi =
1528 cpu_to_le32(U64_HI(cur_data_offset));
1529 cur_query_entry->address.lo =
1530 cpu_to_le32(U64_LO(cur_data_offset));
1531
1532 cur_data_offset += sizeof(struct per_queue_stats);
1533 }
1534}
1535
1536void bnx2x_stats_init(struct bnx2x *bp)
1537{
1538 int /*abs*/port = BP_PORT(bp);
1539 int mb_idx = BP_FW_MB_IDX(bp);
1540 int i;
1541
1542 bp->stats_pending = 0;
1543 bp->executer_idx = 0;
1544 bp->stats_counter = 0;
1545
1546 /* port and func stats for management */
1547 if (!BP_NOMCP(bp)) {
1548 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1549 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1550
1551 } else {
1552 bp->port.port_stx = 0;
1553 bp->func_stx = 0;
1554 }
1555 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1556 bp->port.port_stx, bp->func_stx);
1557
1558 port = BP_PORT(bp);
1559 /* port stats */
1560 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1561 bp->port.old_nig_stats.brb_discard =
1562 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1563 bp->port.old_nig_stats.brb_truncate =
1564 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1565 if (!CHIP_IS_E3(bp)) {
1566 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1567 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1568 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1569 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1570 }
1571
1572 /* function stats */
1573 for_each_queue(bp, i) {
1574 struct bnx2x_fastpath *fp = &bp->fp[i];
1575
1576 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
1577 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
1578 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
1579 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
1580 }
1581
1582 /* Prepare statistics ramrod data */
1583 bnx2x_prep_fw_stats_req(bp);
1584
1585 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1586 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1587
1588 bp->stats_state = STATS_STATE_DISABLED;
1589
1590 if (bp->port.pmf) {
1591 if (bp->port.port_stx)
1592 bnx2x_port_stats_base_init(bp);
1593
1594 if (bp->func_stx)
1595 bnx2x_func_stats_base_init(bp);
1596
1597 } else if (bp->func_stx)
1598 bnx2x_func_stats_base_update(bp);
1599}
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
new file mode 100644
index 00000000000..5d8ce2f6afe
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -0,0 +1,381 @@
1/* bnx2x_stats.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#ifndef BNX2X_STATS_H
18#define BNX2X_STATS_H
19
20#include <linux/types.h>
21
22struct nig_stats {
23 u32 brb_discard;
24 u32 brb_packet;
25 u32 brb_truncate;
26 u32 flow_ctrl_discard;
27 u32 flow_ctrl_octets;
28 u32 flow_ctrl_packet;
29 u32 mng_discard;
30 u32 mng_octet_inp;
31 u32 mng_octet_out;
32 u32 mng_packet_inp;
33 u32 mng_packet_out;
34 u32 pbf_octets;
35 u32 pbf_packet;
36 u32 safc_inp;
37 u32 egress_mac_pkt0_lo;
38 u32 egress_mac_pkt0_hi;
39 u32 egress_mac_pkt1_lo;
40 u32 egress_mac_pkt1_hi;
41};
42
43
44enum bnx2x_stats_event {
45 STATS_EVENT_PMF = 0,
46 STATS_EVENT_LINK_UP,
47 STATS_EVENT_UPDATE,
48 STATS_EVENT_STOP,
49 STATS_EVENT_MAX
50};
51
52enum bnx2x_stats_state {
53 STATS_STATE_DISABLED = 0,
54 STATS_STATE_ENABLED,
55 STATS_STATE_MAX
56};
57
58struct bnx2x_eth_stats {
59 u32 total_bytes_received_hi;
60 u32 total_bytes_received_lo;
61 u32 total_bytes_transmitted_hi;
62 u32 total_bytes_transmitted_lo;
63 u32 total_unicast_packets_received_hi;
64 u32 total_unicast_packets_received_lo;
65 u32 total_multicast_packets_received_hi;
66 u32 total_multicast_packets_received_lo;
67 u32 total_broadcast_packets_received_hi;
68 u32 total_broadcast_packets_received_lo;
69 u32 total_unicast_packets_transmitted_hi;
70 u32 total_unicast_packets_transmitted_lo;
71 u32 total_multicast_packets_transmitted_hi;
72 u32 total_multicast_packets_transmitted_lo;
73 u32 total_broadcast_packets_transmitted_hi;
74 u32 total_broadcast_packets_transmitted_lo;
75 u32 valid_bytes_received_hi;
76 u32 valid_bytes_received_lo;
77
78 u32 error_bytes_received_hi;
79 u32 error_bytes_received_lo;
80 u32 etherstatsoverrsizepkts_hi;
81 u32 etherstatsoverrsizepkts_lo;
82 u32 no_buff_discard_hi;
83 u32 no_buff_discard_lo;
84
85 u32 rx_stat_ifhcinbadoctets_hi;
86 u32 rx_stat_ifhcinbadoctets_lo;
87 u32 tx_stat_ifhcoutbadoctets_hi;
88 u32 tx_stat_ifhcoutbadoctets_lo;
89 u32 rx_stat_dot3statsfcserrors_hi;
90 u32 rx_stat_dot3statsfcserrors_lo;
91 u32 rx_stat_dot3statsalignmenterrors_hi;
92 u32 rx_stat_dot3statsalignmenterrors_lo;
93 u32 rx_stat_dot3statscarriersenseerrors_hi;
94 u32 rx_stat_dot3statscarriersenseerrors_lo;
95 u32 rx_stat_falsecarriererrors_hi;
96 u32 rx_stat_falsecarriererrors_lo;
97 u32 rx_stat_etherstatsundersizepkts_hi;
98 u32 rx_stat_etherstatsundersizepkts_lo;
99 u32 rx_stat_dot3statsframestoolong_hi;
100 u32 rx_stat_dot3statsframestoolong_lo;
101 u32 rx_stat_etherstatsfragments_hi;
102 u32 rx_stat_etherstatsfragments_lo;
103 u32 rx_stat_etherstatsjabbers_hi;
104 u32 rx_stat_etherstatsjabbers_lo;
105 u32 rx_stat_maccontrolframesreceived_hi;
106 u32 rx_stat_maccontrolframesreceived_lo;
107 u32 rx_stat_bmac_xpf_hi;
108 u32 rx_stat_bmac_xpf_lo;
109 u32 rx_stat_bmac_xcf_hi;
110 u32 rx_stat_bmac_xcf_lo;
111 u32 rx_stat_xoffstateentered_hi;
112 u32 rx_stat_xoffstateentered_lo;
113 u32 rx_stat_xonpauseframesreceived_hi;
114 u32 rx_stat_xonpauseframesreceived_lo;
115 u32 rx_stat_xoffpauseframesreceived_hi;
116 u32 rx_stat_xoffpauseframesreceived_lo;
117 u32 tx_stat_outxonsent_hi;
118 u32 tx_stat_outxonsent_lo;
119 u32 tx_stat_outxoffsent_hi;
120 u32 tx_stat_outxoffsent_lo;
121 u32 tx_stat_flowcontroldone_hi;
122 u32 tx_stat_flowcontroldone_lo;
123 u32 tx_stat_etherstatscollisions_hi;
124 u32 tx_stat_etherstatscollisions_lo;
125 u32 tx_stat_dot3statssinglecollisionframes_hi;
126 u32 tx_stat_dot3statssinglecollisionframes_lo;
127 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
128 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
129 u32 tx_stat_dot3statsdeferredtransmissions_hi;
130 u32 tx_stat_dot3statsdeferredtransmissions_lo;
131 u32 tx_stat_dot3statsexcessivecollisions_hi;
132 u32 tx_stat_dot3statsexcessivecollisions_lo;
133 u32 tx_stat_dot3statslatecollisions_hi;
134 u32 tx_stat_dot3statslatecollisions_lo;
135 u32 tx_stat_etherstatspkts64octets_hi;
136 u32 tx_stat_etherstatspkts64octets_lo;
137 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
138 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
139 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
140 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
141 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
142 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
143 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
144 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
145 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
146 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
147 u32 tx_stat_etherstatspktsover1522octets_hi;
148 u32 tx_stat_etherstatspktsover1522octets_lo;
149 u32 tx_stat_bmac_2047_hi;
150 u32 tx_stat_bmac_2047_lo;
151 u32 tx_stat_bmac_4095_hi;
152 u32 tx_stat_bmac_4095_lo;
153 u32 tx_stat_bmac_9216_hi;
154 u32 tx_stat_bmac_9216_lo;
155 u32 tx_stat_bmac_16383_hi;
156 u32 tx_stat_bmac_16383_lo;
157 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
158 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
159 u32 tx_stat_bmac_ufl_hi;
160 u32 tx_stat_bmac_ufl_lo;
161
162 u32 pause_frames_received_hi;
163 u32 pause_frames_received_lo;
164 u32 pause_frames_sent_hi;
165 u32 pause_frames_sent_lo;
166
167 u32 etherstatspkts1024octetsto1522octets_hi;
168 u32 etherstatspkts1024octetsto1522octets_lo;
169 u32 etherstatspktsover1522octets_hi;
170 u32 etherstatspktsover1522octets_lo;
171
172 u32 brb_drop_hi;
173 u32 brb_drop_lo;
174 u32 brb_truncate_hi;
175 u32 brb_truncate_lo;
176
177 u32 mac_filter_discard;
178 u32 mf_tag_discard;
179 u32 brb_truncate_discard;
180 u32 mac_discard;
181
182 u32 driver_xoff;
183 u32 rx_err_discard_pkt;
184 u32 rx_skb_alloc_failed;
185 u32 hw_csum_err;
186
187 u32 nig_timer_max;
188
189 /* TPA */
190 u32 total_tpa_aggregations_hi;
191 u32 total_tpa_aggregations_lo;
192 u32 total_tpa_aggregated_frames_hi;
193 u32 total_tpa_aggregated_frames_lo;
194 u32 total_tpa_bytes_hi;
195 u32 total_tpa_bytes_lo;
196};
197
198
199struct bnx2x_eth_q_stats {
200 u32 total_unicast_bytes_received_hi;
201 u32 total_unicast_bytes_received_lo;
202 u32 total_broadcast_bytes_received_hi;
203 u32 total_broadcast_bytes_received_lo;
204 u32 total_multicast_bytes_received_hi;
205 u32 total_multicast_bytes_received_lo;
206 u32 total_bytes_received_hi;
207 u32 total_bytes_received_lo;
208 u32 total_unicast_bytes_transmitted_hi;
209 u32 total_unicast_bytes_transmitted_lo;
210 u32 total_broadcast_bytes_transmitted_hi;
211 u32 total_broadcast_bytes_transmitted_lo;
212 u32 total_multicast_bytes_transmitted_hi;
213 u32 total_multicast_bytes_transmitted_lo;
214 u32 total_bytes_transmitted_hi;
215 u32 total_bytes_transmitted_lo;
216 u32 total_unicast_packets_received_hi;
217 u32 total_unicast_packets_received_lo;
218 u32 total_multicast_packets_received_hi;
219 u32 total_multicast_packets_received_lo;
220 u32 total_broadcast_packets_received_hi;
221 u32 total_broadcast_packets_received_lo;
222 u32 total_unicast_packets_transmitted_hi;
223 u32 total_unicast_packets_transmitted_lo;
224 u32 total_multicast_packets_transmitted_hi;
225 u32 total_multicast_packets_transmitted_lo;
226 u32 total_broadcast_packets_transmitted_hi;
227 u32 total_broadcast_packets_transmitted_lo;
228 u32 valid_bytes_received_hi;
229 u32 valid_bytes_received_lo;
230
231 u32 etherstatsoverrsizepkts_hi;
232 u32 etherstatsoverrsizepkts_lo;
233 u32 no_buff_discard_hi;
234 u32 no_buff_discard_lo;
235
236 u32 driver_xoff;
237 u32 rx_err_discard_pkt;
238 u32 rx_skb_alloc_failed;
239 u32 hw_csum_err;
240
241 u32 total_packets_received_checksum_discarded_hi;
242 u32 total_packets_received_checksum_discarded_lo;
243 u32 total_packets_received_ttl0_discarded_hi;
244 u32 total_packets_received_ttl0_discarded_lo;
245 u32 total_transmitted_dropped_packets_error_hi;
246 u32 total_transmitted_dropped_packets_error_lo;
247
248 /* TPA */
249 u32 total_tpa_aggregations_hi;
250 u32 total_tpa_aggregations_lo;
251 u32 total_tpa_aggregated_frames_hi;
252 u32 total_tpa_aggregated_frames_lo;
253 u32 total_tpa_bytes_hi;
254 u32 total_tpa_bytes_lo;
255};
256
257/****************************************************************************
258* Macros
259****************************************************************************/
260
261/* sum[hi:lo] += add[hi:lo] */
262#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
263 do { \
264 s_lo += a_lo; \
265 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
266 } while (0)
267
268/* difference = minuend - subtrahend */
269#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
270 do { \
271 if (m_lo < s_lo) { \
272 /* underflow */ \
273 d_hi = m_hi - s_hi; \
274 if (d_hi > 0) { \
275 /* we can 'loan' 1 */ \
276 d_hi--; \
277 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
278 } else { \
279 /* m_hi <= s_hi */ \
280 d_hi = 0; \
281 d_lo = 0; \
282 } \
283 } else { \
284 /* m_lo >= s_lo */ \
285 if (m_hi < s_hi) { \
286 d_hi = 0; \
287 d_lo = 0; \
288 } else { \
289 /* m_hi >= s_hi */ \
290 d_hi = m_hi - s_hi; \
291 d_lo = m_lo - s_lo; \
292 } \
293 } \
294 } while (0)
295
296#define UPDATE_STAT64(s, t) \
297 do { \
298 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
299 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
300 pstats->mac_stx[0].t##_hi = new->s##_hi; \
301 pstats->mac_stx[0].t##_lo = new->s##_lo; \
302 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
303 pstats->mac_stx[1].t##_lo, diff.lo); \
304 } while (0)
305
306#define UPDATE_STAT64_NIG(s, t) \
307 do { \
308 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
309 diff.lo, new->s##_lo, old->s##_lo); \
310 ADD_64(estats->t##_hi, diff.hi, \
311 estats->t##_lo, diff.lo); \
312 } while (0)
313
314/* sum[hi:lo] += add */
315#define ADD_EXTEND_64(s_hi, s_lo, a) \
316 do { \
317 s_lo += a; \
318 s_hi += (s_lo < a) ? 1 : 0; \
319 } while (0)
320
321#define ADD_STAT64(diff, t) \
322 do { \
323 ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
324 pstats->mac_stx[1].t##_lo, new->diff##_lo); \
325 } while (0)
326
327#define UPDATE_EXTEND_STAT(s) \
328 do { \
329 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
330 pstats->mac_stx[1].s##_lo, \
331 new->s); \
332 } while (0)
333
334#define UPDATE_EXTEND_TSTAT(s, t) \
335 do { \
336 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
337 old_tclient->s = tclient->s; \
338 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
339 } while (0)
340
341#define UPDATE_EXTEND_USTAT(s, t) \
342 do { \
343 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
344 old_uclient->s = uclient->s; \
345 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
346 } while (0)
347
348#define UPDATE_EXTEND_XSTAT(s, t) \
349 do { \
350 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
351 old_xclient->s = xclient->s; \
352 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
353 } while (0)
354
355/* minuend -= subtrahend */
356#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
357 do { \
358 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
359 } while (0)
360
361/* minuend[hi:lo] -= subtrahend */
362#define SUB_EXTEND_64(m_hi, m_lo, s) \
363 do { \
364 SUB_64(m_hi, 0, m_lo, s); \
365 } while (0)
366
367#define SUB_EXTEND_USTAT(s, t) \
368 do { \
369 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
370 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
371 } while (0)
372
373
374/* forward */
375struct bnx2x;
376
377void bnx2x_stats_init(struct bnx2x *bp);
378
379void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
380
381#endif /* BNX2X_STATS_H */