aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r--drivers/net/bnx2x/Makefile7
-rw-r--r--drivers/net/bnx2x/bnx2x.h1197
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2251
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h652
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h534
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c1971
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h594
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h37
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h3138
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h152
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h506
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c6735
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h206
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c8040
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h5364
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c1411
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h239
17 files changed, 33034 insertions, 0 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
new file mode 100644
index 00000000000..084afce89ae
--- /dev/null
+++ b/drivers/net/bnx2x/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Broadcom 10-Gigabit ethernet driver
3#
4
5obj-$(CONFIG_BNX2X) += bnx2x.o
6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
new file mode 100644
index 00000000000..53af9c93e75
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -0,0 +1,1197 @@
1/* bnx2x.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 */
13
14#ifndef BNX2X_H
15#define BNX2X_H
16
17/* compilation time flags */
18
19/* define this to make the driver freeze on error to allow getting debug info
20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */
22
23#define DRV_MODULE_VERSION "1.52.53-3"
24#define DRV_MODULE_RELDATE "2010/18/04"
25#define BNX2X_BC_VER 0x040200
26
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
28#define BCM_VLAN 1
29#endif
30
31#define BNX2X_MULTI_QUEUE
32
33#define BNX2X_NEW_NAPI
34
35
36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1
39#include "../cnic_if.h"
40#endif
41
42
43#ifdef BCM_CNIC
44#define BNX2X_MIN_MSIX_VEC_CNT 3
45#define BNX2X_MSIX_VEC_FP_START 2
46#else
47#define BNX2X_MIN_MSIX_VEC_CNT 2
48#define BNX2X_MSIX_VEC_FP_START 1
49#endif
50
51#include <linux/mdio.h>
52#include <linux/pci.h>
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
56#include "bnx2x_link.h"
57#include "bnx2x_stats.h"
58
59/* error/debug prints */
60
61#define DRV_MODULE_NAME "bnx2x"
62
63/* for messages that are currently off */
64#define BNX2X_MSG_OFF 0
65#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
66#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
67#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
68#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
69#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
70#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
71
72#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
73
74/* regular debug print */
75#define DP(__mask, __fmt, __args...) \
76do { \
77 if (bp->msg_enable & (__mask)) \
78 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
79 __func__, __LINE__, \
80 bp->dev ? (bp->dev->name) : "?", \
81 ##__args); \
82} while (0)
83
84/* errors debug print */
85#define BNX2X_DBG_ERR(__fmt, __args...) \
86do { \
87 if (netif_msg_probe(bp)) \
88 pr_err("[%s:%d(%s)]" __fmt, \
89 __func__, __LINE__, \
90 bp->dev ? (bp->dev->name) : "?", \
91 ##__args); \
92} while (0)
93
94/* for errors (never masked) */
95#define BNX2X_ERR(__fmt, __args...) \
96do { \
97 pr_err("[%s:%d(%s)]" __fmt, \
98 __func__, __LINE__, \
99 bp->dev ? (bp->dev->name) : "?", \
100 ##__args); \
101 } while (0)
102
103#define BNX2X_ERROR(__fmt, __args...) do { \
104 pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
105 } while (0)
106
107
108/* before we have a dev->name use dev_info() */
109#define BNX2X_DEV_INFO(__fmt, __args...) \
110do { \
111 if (netif_msg_probe(bp)) \
112 dev_info(&bp->pdev->dev, __fmt, ##__args); \
113} while (0)
114
115void bnx2x_panic_dump(struct bnx2x *bp);
116
117#ifdef BNX2X_STOP_ON_ERROR
118#define bnx2x_panic() do { \
119 bp->panic = 1; \
120 BNX2X_ERR("driver assert\n"); \
121 bnx2x_int_disable(bp); \
122 bnx2x_panic_dump(bp); \
123 } while (0)
124#else
125#define bnx2x_panic() do { \
126 bp->panic = 1; \
127 BNX2X_ERR("driver assert\n"); \
128 bnx2x_panic_dump(bp); \
129 } while (0)
130#endif
131
132
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32)
135#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
136
137
138#define REG_ADDR(bp, offset) (bp->regview + offset)
139
140#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
141#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
142
143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
145#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
146
147#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
148#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
149
150#define REG_RD_DMAE(bp, offset, valp, len32) \
151 do { \
152 bnx2x_read_dmae(bp, offset, len32);\
153 memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
154 } while (0)
155
156#define REG_WR_DMAE(bp, offset, valp, len32) \
157 do { \
158 memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
159 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
160 offset, len32); \
161 } while (0)
162
163#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
164 do { \
165 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
166 bnx2x_write_big_buf_wb(bp, addr, len32); \
167 } while (0)
168
169#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
170 offsetof(struct shmem_region, field))
171#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
172#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
173
174#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
175 offsetof(struct shmem2_region, field))
176#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
177#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
178
179#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
180#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
181
182#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
183#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
184
185#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
186 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
187
188
189/* fast path */
190
191struct sw_rx_bd {
192 struct sk_buff *skb;
193 DEFINE_DMA_UNMAP_ADDR(mapping);
194};
195
196struct sw_tx_bd {
197 struct sk_buff *skb;
198 u16 first_bd;
199 u8 flags;
200/* Set on the first BD descriptor when there is a split BD */
201#define BNX2X_TSO_SPLIT_BD (1<<0)
202};
203
204struct sw_rx_page {
205 struct page *page;
206 DEFINE_DMA_UNMAP_ADDR(mapping);
207};
208
209union db_prod {
210 struct doorbell_set_prod data;
211 u32 raw;
212};
213
214
215/* MC hsi */
216#define BCM_PAGE_SHIFT 12
217#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
218#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
219#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
220
221#define PAGES_PER_SGE_SHIFT 0
222#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
223#define SGE_PAGE_SIZE PAGE_SIZE
224#define SGE_PAGE_SHIFT PAGE_SHIFT
225#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
226
227/* SGE ring related macros */
228#define NUM_RX_SGE_PAGES 2
229#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
230#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
231/* RX_SGE_CNT is promised to be a power of 2 */
232#define RX_SGE_MASK (RX_SGE_CNT - 1)
233#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
234#define MAX_RX_SGE (NUM_RX_SGE - 1)
235#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
236 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
237#define RX_SGE(x) ((x) & MAX_RX_SGE)
238
239/* SGE producer mask related macros */
240/* Number of bits in one sge_mask array element */
241#define RX_SGE_MASK_ELEM_SZ 64
242#define RX_SGE_MASK_ELEM_SHIFT 6
243#define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1)
244
245/* Creates a bitmask of all ones in less significant bits.
246 idx - index of the most significant bit in the created mask */
247#define RX_SGE_ONES_MASK(idx) \
248 (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
249#define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0))
250
251/* Number of u64 elements in SGE mask array */
252#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
253 RX_SGE_MASK_ELEM_SZ)
254#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
255#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
256
257
258struct bnx2x_fastpath {
259
260 struct napi_struct napi;
261 struct host_status_block *status_blk;
262 dma_addr_t status_blk_mapping;
263
264 struct sw_tx_bd *tx_buf_ring;
265
266 union eth_tx_bd_types *tx_desc_ring;
267 dma_addr_t tx_desc_mapping;
268
269 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
270 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
271
272 struct eth_rx_bd *rx_desc_ring;
273 dma_addr_t rx_desc_mapping;
274
275 union eth_rx_cqe *rx_comp_ring;
276 dma_addr_t rx_comp_mapping;
277
278 /* SGE ring */
279 struct eth_rx_sge *rx_sge_ring;
280 dma_addr_t rx_sge_mapping;
281
282 u64 sge_mask[RX_SGE_MASK_LEN];
283
284 int state;
285#define BNX2X_FP_STATE_CLOSED 0
286#define BNX2X_FP_STATE_IRQ 0x80000
287#define BNX2X_FP_STATE_OPENING 0x90000
288#define BNX2X_FP_STATE_OPEN 0xa0000
289#define BNX2X_FP_STATE_HALTING 0xb0000
290#define BNX2X_FP_STATE_HALTED 0xc0000
291
292 u8 index; /* number in fp array */
293 u8 cl_id; /* eth client id */
294 u8 sb_id; /* status block number in HW */
295
296 union db_prod tx_db;
297
298 u16 tx_pkt_prod;
299 u16 tx_pkt_cons;
300 u16 tx_bd_prod;
301 u16 tx_bd_cons;
302 __le16 *tx_cons_sb;
303
304 __le16 fp_c_idx;
305 __le16 fp_u_idx;
306
307 u16 rx_bd_prod;
308 u16 rx_bd_cons;
309 u16 rx_comp_prod;
310 u16 rx_comp_cons;
311 u16 rx_sge_prod;
312 /* The last maximal completed SGE */
313 u16 last_max_sge;
314 __le16 *rx_cons_sb;
315 __le16 *rx_bd_cons_sb;
316
317
318 unsigned long tx_pkt,
319 rx_pkt,
320 rx_calls;
321
322 /* TPA related */
323 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
324 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
325#define BNX2X_TPA_START 1
326#define BNX2X_TPA_STOP 2
327 u8 disable_tpa;
328#ifdef BNX2X_STOP_ON_ERROR
329 u64 tpa_queue_used;
330#endif
331
332 struct tstorm_per_client_stats old_tclient;
333 struct ustorm_per_client_stats old_uclient;
334 struct xstorm_per_client_stats old_xclient;
335 struct bnx2x_eth_q_stats eth_q_stats;
336
337 /* The size is calculated using the following:
338 sizeof name field from netdev structure +
339 4 ('-Xx-' string) +
340 4 (for the digits and to make it DWORD aligned) */
341#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
342 char name[FP_NAME_SIZE];
343 struct bnx2x *bp; /* parent */
344};
345
346#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
347
348
349/* MC hsi */
350#define MAX_FETCH_BD 13 /* HW max BDs per packet */
351#define RX_COPY_THRESH 92
352
353#define NUM_TX_RINGS 16
354#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
355#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
356#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
357#define MAX_TX_BD (NUM_TX_BD - 1)
358#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
359#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
360 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
361#define TX_BD(x) ((x) & MAX_TX_BD)
362#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
363
364/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
365#define NUM_RX_RINGS 8
366#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
367#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
368#define RX_DESC_MASK (RX_DESC_CNT - 1)
369#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
370#define MAX_RX_BD (NUM_RX_BD - 1)
371#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
372#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
373 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
374#define RX_BD(x) ((x) & MAX_RX_BD)
375
376/* As long as CQE is 4 times bigger than BD entry we have to allocate
377 4 times more pages for CQ ring in order to keep it balanced with
378 BD ring */
379#define NUM_RCQ_RINGS (NUM_RX_RINGS * 4)
380#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
381#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
382#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
383#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
384#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
385#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
386 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
387#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
388
389
390/* This is needed for determining of last_max */
391#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
392
393#define __SGE_MASK_SET_BIT(el, bit) \
394 do { \
395 el = ((el) | ((u64)0x1 << (bit))); \
396 } while (0)
397
398#define __SGE_MASK_CLEAR_BIT(el, bit) \
399 do { \
400 el = ((el) & (~((u64)0x1 << (bit)))); \
401 } while (0)
402
403#define SGE_MASK_SET_BIT(fp, idx) \
404 __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
405 ((idx) & RX_SGE_MASK_ELEM_MASK))
406
407#define SGE_MASK_CLEAR_BIT(fp, idx) \
408 __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
409 ((idx) & RX_SGE_MASK_ELEM_MASK))
410
411
412/* used on a CID received from the HW */
413#define SW_CID(x) (le32_to_cpu(x) & \
414 (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
415#define CQE_CMD(x) (le32_to_cpu(x) >> \
416 COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
417
418#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
419 le32_to_cpu((bd)->addr_lo))
420#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
421
422
423#define DPM_TRIGER_TYPE 0x40
424#define DOORBELL(bp, cid, val) \
425 do { \
426 writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \
427 DPM_TRIGER_TYPE); \
428 } while (0)
429
430
431/* TX CSUM helpers */
432#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
433 skb->csum_offset)
434#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
435 skb->csum_offset))
436
437#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
438
439#define XMIT_PLAIN 0
440#define XMIT_CSUM_V4 0x1
441#define XMIT_CSUM_V6 0x2
442#define XMIT_CSUM_TCP 0x4
443#define XMIT_GSO_V4 0x8
444#define XMIT_GSO_V6 0x10
445
446#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
447#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
448
449
450/* stuff added to make the code fit 80Col */
451
452#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
453
454#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
455#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
456#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
457 (TPA_TYPE_START | TPA_TYPE_END))
458
459#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
460
461#define BNX2X_IP_CSUM_ERR(cqe) \
462 (!((cqe)->fast_path_cqe.status_flags & \
463 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
464 ((cqe)->fast_path_cqe.type_error_flags & \
465 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
466
467#define BNX2X_L4_CSUM_ERR(cqe) \
468 (!((cqe)->fast_path_cqe.status_flags & \
469 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
470 ((cqe)->fast_path_cqe.type_error_flags & \
471 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
472
473#define BNX2X_RX_CSUM_OK(cqe) \
474 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
475
476#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
477 (((le16_to_cpu(flags) & \
478 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
479 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
480 == PRS_FLAG_OVERETH_IPV4)
481#define BNX2X_RX_SUM_FIX(cqe) \
482 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
483
484
485#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
486#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
487
488#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
489#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
490#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
491
492#define BNX2X_RX_SB_INDEX \
493 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX])
494
495#define BNX2X_RX_SB_BD_INDEX \
496 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
497
498#define BNX2X_RX_SB_INDEX_NUM \
499 (((U_SB_ETH_RX_CQ_INDEX << \
500 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
501 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
502 ((U_SB_ETH_RX_BD_INDEX << \
503 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
504 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
505
506#define BNX2X_TX_SB_INDEX \
507 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX])
508
509
510/* end of fast path */
511
512/* common */
513
514struct bnx2x_common {
515
516 u32 chip_id;
517/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
518#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
519
520#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
521#define CHIP_NUM_57710 0x164e
522#define CHIP_NUM_57711 0x164f
523#define CHIP_NUM_57711E 0x1650
524#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
525#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
526#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
527#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
528 CHIP_IS_57711E(bp))
529#define IS_E1H_OFFSET CHIP_IS_E1H(bp)
530
531#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
532#define CHIP_REV_Ax 0x00000000
533/* assume maximum 5 revisions */
534#define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000)
535/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
536#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
537 !(CHIP_REV(bp) & 0x00001000))
538/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
539#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
540 (CHIP_REV(bp) & 0x00001000))
541
542#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
543 ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
544
545#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
546#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
547
548 int flash_size;
549#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
550#define NVRAM_TIMEOUT_COUNT 30000
551#define NVRAM_PAGE_SIZE 256
552
553 u32 shmem_base;
554 u32 shmem2_base;
555
556 u32 hw_config;
557
558 u32 bc_ver;
559};
560
561
562/* end of common */
563
564/* port */
565
566struct bnx2x_port {
567 u32 pmf;
568
569 u32 link_config;
570
571 u32 supported;
572/* link settings - missing defines */
573#define SUPPORTED_2500baseX_Full (1 << 15)
574
575 u32 advertising;
576/* link settings - missing defines */
577#define ADVERTISED_2500baseX_Full (1 << 15)
578
579 u32 phy_addr;
580
581 /* used to synchronize phy accesses */
582 struct mutex phy_mutex;
583 int need_hw_lock;
584
585 u32 port_stx;
586
587 struct nig_stats old_nig_stats;
588};
589
590/* end of port */
591
592
593
594#ifdef BCM_CNIC
595#define MAX_CONTEXT 15
596#else
597#define MAX_CONTEXT 16
598#endif
599
600union cdu_context {
601 struct eth_context eth;
602 char pad[1024];
603};
604
605#define MAX_DMAE_C 8
606
607/* DMA memory not used in fastpath */
608struct bnx2x_slowpath {
609 union cdu_context context[MAX_CONTEXT];
610 struct eth_stats_query fw_stats;
611 struct mac_configuration_cmd mac_config;
612 struct mac_configuration_cmd mcast_config;
613
614 /* used by dmae command executer */
615 struct dmae_command dmae[MAX_DMAE_C];
616
617 u32 stats_comp;
618 union mac_stats mac_stats;
619 struct nig_stats nig_stats;
620 struct host_port_stats port_stats;
621 struct host_func_stats func_stats;
622 struct host_func_stats func_stats_base;
623
624 u32 wb_comp;
625 u32 wb_data[4];
626};
627
628#define bnx2x_sp(bp, var) (&bp->slowpath->var)
629#define bnx2x_sp_mapping(bp, var) \
630 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
631
632
633/* attn group wiring */
634#define MAX_DYNAMIC_ATTN_GRPS 8
635
636struct attn_route {
637 u32 sig[4];
638};
639
640typedef enum {
641 BNX2X_RECOVERY_DONE,
642 BNX2X_RECOVERY_INIT,
643 BNX2X_RECOVERY_WAIT,
644} bnx2x_recovery_state_t;
645
646struct bnx2x {
647 /* Fields used in the tx and intr/napi performance paths
648 * are grouped together in the beginning of the structure
649 */
650 struct bnx2x_fastpath fp[MAX_CONTEXT];
651 void __iomem *regview;
652 void __iomem *doorbells;
653#ifdef BCM_CNIC
654#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
655#else
656#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
657#endif
658
659 struct net_device *dev;
660 struct pci_dev *pdev;
661
662 atomic_t intr_sem;
663
664 bnx2x_recovery_state_t recovery_state;
665 int is_leader;
666#ifdef BCM_CNIC
667 struct msix_entry msix_table[MAX_CONTEXT+2];
668#else
669 struct msix_entry msix_table[MAX_CONTEXT+1];
670#endif
671#define INT_MODE_INTx 1
672#define INT_MODE_MSI 2
673
674 int tx_ring_size;
675
676#ifdef BCM_VLAN
677 struct vlan_group *vlgrp;
678#endif
679
680 u32 rx_csum;
681 u32 rx_buf_size;
682#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
683#define ETH_MIN_PACKET_SIZE 60
684#define ETH_MAX_PACKET_SIZE 1500
685#define ETH_MAX_JUMBO_PACKET_SIZE 9600
686
687 /* Max supported alignment is 256 (8 shift) */
688#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
689 L1_CACHE_SHIFT : 8)
690#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
691
692 struct host_def_status_block *def_status_blk;
693#define DEF_SB_ID 16
694 __le16 def_c_idx;
695 __le16 def_u_idx;
696 __le16 def_x_idx;
697 __le16 def_t_idx;
698 __le16 def_att_idx;
699 u32 attn_state;
700 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
701
702 /* slow path ring */
703 struct eth_spe *spq;
704 dma_addr_t spq_mapping;
705 u16 spq_prod_idx;
706 struct eth_spe *spq_prod_bd;
707 struct eth_spe *spq_last_bd;
708 __le16 *dsb_sp_prod;
709 u16 spq_left; /* serialize spq */
710 /* used to synchronize spq accesses */
711 spinlock_t spq_lock;
712
713 /* Flags for marking that there is a STAT_QUERY or
714 SET_MAC ramrod pending */
715 int stats_pending;
716 int set_mac_pending;
717
718 /* End of fields used in the performance code paths */
719
720 int panic;
721 int msg_enable;
722
723 u32 flags;
724#define PCIX_FLAG 1
725#define PCI_32BIT_FLAG 2
726#define ONE_PORT_FLAG 4
727#define NO_WOL_FLAG 8
728#define USING_DAC_FLAG 0x10
729#define USING_MSIX_FLAG 0x20
730#define USING_MSI_FLAG 0x40
731#define TPA_ENABLE_FLAG 0x80
732#define NO_MCP_FLAG 0x100
733#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
734#define HW_VLAN_TX_FLAG 0x400
735#define HW_VLAN_RX_FLAG 0x800
736#define MF_FUNC_DIS 0x1000
737
738 int func;
739#define BP_PORT(bp) (bp->func % PORT_MAX)
740#define BP_FUNC(bp) (bp->func)
741#define BP_E1HVN(bp) (bp->func >> 1)
742#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
743
744#ifdef BCM_CNIC
745#define BCM_CNIC_CID_START 16
746#define BCM_ISCSI_ETH_CL_ID 17
747#endif
748
749 int pm_cap;
750 int pcie_cap;
751 int mrrs;
752
753 struct delayed_work sp_task;
754 struct delayed_work reset_task;
755 struct timer_list timer;
756 int current_interval;
757
758 u16 fw_seq;
759 u16 fw_drv_pulse_wr_seq;
760 u32 func_stx;
761
762 struct link_params link_params;
763 struct link_vars link_vars;
764 struct mdio_if_info mdio;
765
766 struct bnx2x_common common;
767 struct bnx2x_port port;
768
769 struct cmng_struct_per_port cmng;
770 u32 vn_weight_sum;
771
772 u32 mf_config;
773 u16 e1hov;
774 u8 e1hmf;
775#define IS_E1HMF(bp) (bp->e1hmf != 0)
776
777 u8 wol;
778
779 int rx_ring_size;
780
781 u16 tx_quick_cons_trip_int;
782 u16 tx_quick_cons_trip;
783 u16 tx_ticks_int;
784 u16 tx_ticks;
785
786 u16 rx_quick_cons_trip_int;
787 u16 rx_quick_cons_trip;
788 u16 rx_ticks_int;
789 u16 rx_ticks;
790/* Maximal coalescing timeout in us */
791#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
792
793 u32 lin_cnt;
794
795 int state;
796#define BNX2X_STATE_CLOSED 0
797#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
798#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
799#define BNX2X_STATE_OPEN 0x3000
800#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
801#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
802#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
803#define BNX2X_STATE_DIAG 0xe000
804#define BNX2X_STATE_ERROR 0xf000
805
806 int multi_mode;
807 int num_queues;
808 int disable_tpa;
809 int int_mode;
810
811 u32 rx_mode;
812#define BNX2X_RX_MODE_NONE 0
813#define BNX2X_RX_MODE_NORMAL 1
814#define BNX2X_RX_MODE_ALLMULTI 2
815#define BNX2X_RX_MODE_PROMISC 3
816#define BNX2X_MAX_MULTICAST 64
817#define BNX2X_MAX_EMUL_MULTI 16
818
819 u32 rx_mode_cl_mask;
820
821 dma_addr_t def_status_blk_mapping;
822
823 struct bnx2x_slowpath *slowpath;
824 dma_addr_t slowpath_mapping;
825
826 int dropless_fc;
827
828#ifdef BCM_CNIC
829 u32 cnic_flags;
830#define BNX2X_CNIC_FLAG_MAC_SET 1
831
832 void *t1;
833 dma_addr_t t1_mapping;
834 void *t2;
835 dma_addr_t t2_mapping;
836 void *timers;
837 dma_addr_t timers_mapping;
838 void *qm;
839 dma_addr_t qm_mapping;
840 struct cnic_ops *cnic_ops;
841 void *cnic_data;
842 u32 cnic_tag;
843 struct cnic_eth_dev cnic_eth_dev;
844 struct host_status_block *cnic_sb;
845 dma_addr_t cnic_sb_mapping;
846#define CNIC_SB_ID(bp) BP_L_ID(bp)
847 struct eth_spe *cnic_kwq;
848 struct eth_spe *cnic_kwq_prod;
849 struct eth_spe *cnic_kwq_cons;
850 struct eth_spe *cnic_kwq_last;
851 u16 cnic_kwq_pending;
852 u16 cnic_spq_pending;
853 struct mutex cnic_mutex;
854 u8 iscsi_mac[6];
855#endif
856
857 int dmae_ready;
858 /* used to synchronize dmae accesses */
859 struct mutex dmae_mutex;
860
861 /* used to protect the FW mail box */
862 struct mutex fw_mb_mutex;
863
864 /* used to synchronize stats collecting */
865 int stats_state;
866
867 /* used for synchronization of concurrent threads statistics handling */
868 spinlock_t stats_lock;
869
870 /* used by dmae command loader */
871 struct dmae_command stats_dmae;
872 int executer_idx;
873
874 u16 stats_counter;
875 struct bnx2x_eth_stats eth_stats;
876
877 struct z_stream_s *strm;
878 void *gunzip_buf;
879 dma_addr_t gunzip_mapping;
880 int gunzip_outlen;
881#define FW_BUF_SIZE 0x8000
882#define GUNZIP_BUF(bp) (bp->gunzip_buf)
883#define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
884#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
885
886 struct raw_op *init_ops;
887 /* Init blocks offsets inside init_ops */
888 u16 *init_ops_offsets;
889 /* Data blob - has 32 bit granularity */
890 u32 *init_data;
891 /* Zipped PRAM blobs - raw data */
892 const u8 *tsem_int_table_data;
893 const u8 *tsem_pram_data;
894 const u8 *usem_int_table_data;
895 const u8 *usem_pram_data;
896 const u8 *xsem_int_table_data;
897 const u8 *xsem_pram_data;
898 const u8 *csem_int_table_data;
899 const u8 *csem_pram_data;
900#define INIT_OPS(bp) (bp->init_ops)
901#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
902#define INIT_DATA(bp) (bp->init_data)
903#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
904#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
905#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
906#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
907#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
908#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
909#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
910#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
911
912 char fw_ver[32];
913 const struct firmware *firmware;
914};
915
916
917#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
918 : MAX_CONTEXT)
919#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
920#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
921
922#define for_each_queue(bp, var) \
923 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
924#define for_each_nondefault_queue(bp, var) \
925 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
926
927
928void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
929void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
930 u32 len32);
931int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
932int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
933int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
934u32 bnx2x_fw_command(struct bnx2x *bp, u32 command);
935void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
936void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
937 u32 addr, u32 len);
938void bnx2x_calc_fc_adv(struct bnx2x *bp);
939int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
940 u32 data_hi, u32 data_lo, int common);
941void bnx2x_update_coalesce(struct bnx2x *bp);
942
943static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
944 int wait)
945{
946 u32 val;
947
948 do {
949 val = REG_RD(bp, reg);
950 if (val == expected)
951 break;
952 ms -= wait;
953 msleep(wait);
954
955 } while (ms > 0);
956
957 return val;
958}
959
960
961/* load/unload mode */
962#define LOAD_NORMAL 0
963#define LOAD_OPEN 1
964#define LOAD_DIAG 2
965#define UNLOAD_NORMAL 0
966#define UNLOAD_CLOSE 1
967#define UNLOAD_RECOVERY 2
968
969
970/* DMAE command defines */
971#define DMAE_CMD_SRC_PCI 0
972#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
973
974#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
975#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
976
977#define DMAE_CMD_C_DST_PCI 0
978#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
979
980#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
981
982#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
983#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
984#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
985#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
986
987#define DMAE_CMD_PORT_0 0
988#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
989
990#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
991#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
992#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
993
994#define DMAE_LEN32_RD_MAX 0x80
995#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
996
997#define DMAE_COMP_VAL 0xe0d0d0ae
998
999#define MAX_DMAE_C_PER_PORT 8
1000#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1001 BP_E1HVN(bp))
1002#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1003 E1HVN_MAX)
1004
1005
1006/* PCIE link and speed */
1007#define PCICFG_LINK_WIDTH 0x1f00000
1008#define PCICFG_LINK_WIDTH_SHIFT 20
1009#define PCICFG_LINK_SPEED 0xf0000
1010#define PCICFG_LINK_SPEED_SHIFT 16
1011
1012
1013#define BNX2X_NUM_TESTS 7
1014
1015#define BNX2X_PHY_LOOPBACK 0
1016#define BNX2X_MAC_LOOPBACK 1
1017#define BNX2X_PHY_LOOPBACK_FAILED 1
1018#define BNX2X_MAC_LOOPBACK_FAILED 2
1019#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
1020 BNX2X_PHY_LOOPBACK_FAILED)
1021
1022
1023#define STROM_ASSERT_ARRAY_SIZE 50
1024
1025
1026/* must be used on a CID before placing it on a HW ring */
1027#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1028 (BP_E1HVN(bp) << 17) | (x))
1029
1030#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
1031#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1032
1033
1034#define BNX2X_BTR 1
1035#define MAX_SPQ_PENDING 8
1036
1037
1038/* CMNG constants
1039 derived from lab experiments, and not from system spec calculations !!! */
1040#define DEF_MIN_RATE 100
1041/* resolution of the rate shaping timer - 100 usec */
1042#define RS_PERIODIC_TIMEOUT_USEC 100
1043/* resolution of fairness algorithm in usecs -
1044 coefficient for calculating the actual t fair */
1045#define T_FAIR_COEF 10000000
1046/* number of bytes in single QM arbitration cycle -
1047 coefficient for calculating the fairness timer */
1048#define QM_ARB_BYTES 40000
1049#define FAIR_MEM 2
1050
1051
1052#define ATTN_NIG_FOR_FUNC (1L << 8)
1053#define ATTN_SW_TIMER_4_FUNC (1L << 9)
1054#define GPIO_2_FUNC (1L << 10)
1055#define GPIO_3_FUNC (1L << 11)
1056#define GPIO_4_FUNC (1L << 12)
1057#define ATTN_GENERAL_ATTN_1 (1L << 13)
1058#define ATTN_GENERAL_ATTN_2 (1L << 14)
1059#define ATTN_GENERAL_ATTN_3 (1L << 15)
1060#define ATTN_GENERAL_ATTN_4 (1L << 13)
1061#define ATTN_GENERAL_ATTN_5 (1L << 14)
1062#define ATTN_GENERAL_ATTN_6 (1L << 15)
1063
1064#define ATTN_HARD_WIRED_MASK 0xff00
1065#define ATTENTION_ID 4
1066
1067
1068/* stuff added to make the code fit 80Col */
1069
1070#define BNX2X_PMF_LINK_ASSERT \
1071 GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
1072
1073#define BNX2X_MC_ASSERT_BITS \
1074 (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1075 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1076 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1077 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
1078
1079#define BNX2X_MCP_ASSERT \
1080 GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
1081
1082#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
1083#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
1084 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
1085 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
1086 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
1087 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
1088 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
1089
1090#define HW_INTERRUT_ASSERT_SET_0 \
1091 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
1092 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
1093 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
1094 AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
1095#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
1096 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
1097 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
1098 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
1099 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR)
1100#define HW_INTERRUT_ASSERT_SET_1 \
1101 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
1102 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
1103 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
1104 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
1105 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
1106 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
1107 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
1108 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
1109 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
1110 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
1111 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
1112#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
1113 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
1114 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
1115 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
1116 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
1117 AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
1118 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
1119 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
1120 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
1121 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
1122 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR)
1123#define HW_INTERRUT_ASSERT_SET_2 \
1124 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
1125 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
1126 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
1127 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
1128 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
1129#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
1130 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
1131 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
1132 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
1133 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
1134 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1135 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1136
1137#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1138 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1139 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1140 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1141
1142#define RSS_FLAGS(bp) \
1143 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
1144 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
1145 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
1146 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
1147 (bp->multi_mode << \
1148 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1149#define MULTI_MASK 0x7f
1150
1151
1152#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
1153#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
1154#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
1155#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
1156
1157#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
1158
1159#define BNX2X_SP_DSB_INDEX \
1160(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX])
1161
1162
1163#define CAM_IS_INVALID(x) \
1164(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1165
1166#define CAM_INVALIDATE(x) \
1167 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1168
1169
1170/* Number of u32 elements in MC hash array */
1171#define MC_HASH_SIZE 8
1172#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
1173 TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
1174
1175
1176#ifndef PXP2_REG_PXP2_INT_STS
1177#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1178#endif
1179
1180#define BNX2X_VPD_LEN 128
1181#define VENDOR_ID_LEN 4
1182
1183#ifdef BNX2X_MAIN
1184#define BNX2X_EXTERN
1185#else
1186#define BNX2X_EXTERN extern
1187#endif
1188
1189BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
1190
1191/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1192
1193extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1194
1195void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1196
1197#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
new file mode 100644
index 00000000000..30d20c7fee0
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,2251 @@
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include "bnx2x_cmn.h"
23
24#ifdef BCM_VLAN
25#include <linux/if_vlan.h>
26#endif
27
28static int bnx2x_poll(struct napi_struct *napi, int budget);
29
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
53 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
54
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
117 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
118
119 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
120 hw_cons, sw_cons, pkt_cons);
121
122/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
123 rmb();
124 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
125 }
126*/
127 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
128 sw_cons++;
129 }
130
131 fp->tx_pkt_cons = sw_cons;
132 fp->tx_bd_cons = bd_cons;
133
134 /* Need to make the tx_bd_cons update visible to start_xmit()
135 * before checking for netif_tx_queue_stopped(). Without the
136 * memory barrier, there is a small possibility that
137 * start_xmit() will miss it and cause the queue to be stopped
138 * forever.
139 */
140 smp_mb();
141
142 /* TBD need a thresh? */
143 if (unlikely(netif_tx_queue_stopped(txq))) {
144 /* Taking tx_lock() is needed to prevent reenabling the queue
145 * while it's empty. This could have happen if rx_action() gets
146 * suspended in bnx2x_tx_int() after the condition before
147 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
148 *
149 * stops the queue->sees fresh tx_bd_cons->releases the queue->
150 * sends some packets consuming the whole queue again->
151 * stops the queue
152 */
153
154 __netif_tx_lock(txq, smp_processor_id());
155
156 if ((netif_tx_queue_stopped(txq)) &&
157 (bp->state == BNX2X_STATE_OPEN) &&
158 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
159 netif_tx_wake_queue(txq);
160
161 __netif_tx_unlock(txq);
162 }
163 return 0;
164}
165
166static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
167 u16 idx)
168{
169 u16 last_max = fp->last_max_sge;
170
171 if (SUB_S16(idx, last_max) > 0)
172 fp->last_max_sge = idx;
173}
174
175static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
176 struct eth_fast_path_rx_cqe *fp_cqe)
177{
178 struct bnx2x *bp = fp->bp;
179 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
180 le16_to_cpu(fp_cqe->len_on_bd)) >>
181 SGE_PAGE_SHIFT;
182 u16 last_max, last_elem, first_elem;
183 u16 delta = 0;
184 u16 i;
185
186 if (!sge_len)
187 return;
188
189 /* First mark all used pages */
190 for (i = 0; i < sge_len; i++)
191 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
192
193 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
194 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
195
196 /* Here we assume that the last SGE index is the biggest */
197 prefetch((void *)(fp->sge_mask));
198 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
199
200 last_max = RX_SGE(fp->last_max_sge);
201 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
202 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
203
204 /* If ring is not full */
205 if (last_elem + 1 != first_elem)
206 last_elem++;
207
208 /* Now update the prod */
209 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
210 if (likely(fp->sge_mask[i]))
211 break;
212
213 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
214 delta += RX_SGE_MASK_ELEM_SZ;
215 }
216
217 if (delta > 0) {
218 fp->rx_sge_prod += delta;
219 /* clear page-end entries */
220 bnx2x_clear_sge_mask_next_elems(fp);
221 }
222
223 DP(NETIF_MSG_RX_STATUS,
224 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
225 fp->last_max_sge, fp->rx_sge_prod);
226}
227
228static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
229 struct sk_buff *skb, u16 cons, u16 prod)
230{
231 struct bnx2x *bp = fp->bp;
232 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
233 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
234 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
235 dma_addr_t mapping;
236
237 /* move empty skb from pool to prod and map it */
238 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
239 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
240 bp->rx_buf_size, DMA_FROM_DEVICE);
241 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
242
243 /* move partial skb from cons to pool (don't unmap yet) */
244 fp->tpa_pool[queue] = *cons_rx_buf;
245
246 /* mark bin state as start - print error if current state != stop */
247 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
248 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
249
250 fp->tpa_state[queue] = BNX2X_TPA_START;
251
252 /* point prod_bd to new skb */
253 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
254 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
255
256#ifdef BNX2X_STOP_ON_ERROR
257 fp->tpa_queue_used |= (1 << queue);
258#ifdef _ASM_GENERIC_INT_L64_H
259 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
260#else
261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
262#endif
263 fp->tpa_queue_used);
264#endif
265}
266
267static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
268 struct sk_buff *skb,
269 struct eth_fast_path_rx_cqe *fp_cqe,
270 u16 cqe_idx)
271{
272 struct sw_rx_page *rx_pg, old_rx_pg;
273 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
274 u32 i, frag_len, frag_size, pages;
275 int err;
276 int j;
277
278 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
279 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
280
281 /* This is needed in order to enable forwarding support */
282 if (frag_size)
283 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
284 max(frag_size, (u32)len_on_bd));
285
286#ifdef BNX2X_STOP_ON_ERROR
287 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
289 pages, cqe_idx);
290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
291 fp_cqe->pkt_len, len_on_bd);
292 bnx2x_panic();
293 return -EINVAL;
294 }
295#endif
296
297 /* Run through the SGL and compose the fragmented skb */
298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
300
301 /* FW gives the indices of the SGE as if the ring is an array
302 (meaning that "next" element will consume 2 indices) */
303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
304 rx_pg = &fp->rx_page_ring[sge_idx];
305 old_rx_pg = *rx_pg;
306
307 /* If we fail to allocate a substitute page, we simply stop
308 where we are and drop the whole packet */
309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
310 if (unlikely(err)) {
311 fp->eth_q_stats.rx_skb_alloc_failed++;
312 return err;
313 }
314
315 /* Unmap the page as we r going to pass it to the stack */
316 dma_unmap_page(&bp->pdev->dev,
317 dma_unmap_addr(&old_rx_pg, mapping),
318 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
319
320 /* Add one frag and update the appropriate fields in the skb */
321 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
322
323 skb->data_len += frag_len;
324 skb->truesize += frag_len;
325 skb->len += frag_len;
326
327 frag_size -= frag_len;
328 }
329
330 return 0;
331}
332
333static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
334 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
335 u16 cqe_idx)
336{
337 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
338 struct sk_buff *skb = rx_buf->skb;
339 /* alloc new skb */
340 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
341
342 /* Unmap skb in the pool anyway, as we are going to change
343 pool entry status to BNX2X_TPA_STOP even if new skb allocation
344 fails. */
345 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
346 bp->rx_buf_size, DMA_FROM_DEVICE);
347
348 if (likely(new_skb)) {
349 /* fix ip xsum and give it to the stack */
350 /* (no need to map the new skb) */
351#ifdef BCM_VLAN
352 int is_vlan_cqe =
353 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
354 PARSING_FLAGS_VLAN);
355 int is_not_hwaccel_vlan_cqe =
356 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
357#endif
358
359 prefetch(skb);
360 prefetch(((char *)(skb)) + 128);
361
362#ifdef BNX2X_STOP_ON_ERROR
363 if (pad + len > bp->rx_buf_size) {
364 BNX2X_ERR("skb_put is about to fail... "
365 "pad %d len %d rx_buf_size %d\n",
366 pad, len, bp->rx_buf_size);
367 bnx2x_panic();
368 return;
369 }
370#endif
371
372 skb_reserve(skb, pad);
373 skb_put(skb, len);
374
375 skb->protocol = eth_type_trans(skb, bp->dev);
376 skb->ip_summed = CHECKSUM_UNNECESSARY;
377
378 {
379 struct iphdr *iph;
380
381 iph = (struct iphdr *)skb->data;
382#ifdef BCM_VLAN
383 /* If there is no Rx VLAN offloading -
384 take VLAN tag into an account */
385 if (unlikely(is_not_hwaccel_vlan_cqe))
386 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
387#endif
388 iph->check = 0;
389 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
390 }
391
392 if (!bnx2x_fill_frag_skb(bp, fp, skb,
393 &cqe->fast_path_cqe, cqe_idx)) {
394#ifdef BCM_VLAN
395 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
396 (!is_not_hwaccel_vlan_cqe))
397 vlan_gro_receive(&fp->napi, bp->vlgrp,
398 le16_to_cpu(cqe->fast_path_cqe.
399 vlan_tag), skb);
400 else
401#endif
402 napi_gro_receive(&fp->napi, skb);
403 } else {
404 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
405 " - dropping packet!\n");
406 dev_kfree_skb(skb);
407 }
408
409
410 /* put new skb in bin */
411 fp->tpa_pool[queue].skb = new_skb;
412
413 } else {
414 /* else drop the packet and keep the buffer in the bin */
415 DP(NETIF_MSG_RX_STATUS,
416 "Failed to allocate new skb - dropping packet!\n");
417 fp->eth_q_stats.rx_skb_alloc_failed++;
418 }
419
420 fp->tpa_state[queue] = BNX2X_TPA_STOP;
421}
422
423/* Set Toeplitz hash value in the skb using the value from the
424 * CQE (calculated by HW).
425 */
426static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
427 struct sk_buff *skb)
428{
429 /* Set Toeplitz hash from CQE */
430 if ((bp->dev->features & NETIF_F_RXHASH) &&
431 (cqe->fast_path_cqe.status_flags &
432 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
433 skb->rxhash =
434 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
435}
436
437int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
438{
439 struct bnx2x *bp = fp->bp;
440 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
441 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
442 int rx_pkt = 0;
443
444#ifdef BNX2X_STOP_ON_ERROR
445 if (unlikely(bp->panic))
446 return 0;
447#endif
448
449 /* CQ "next element" is of the size of the regular element,
450 that's why it's ok here */
451 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
452 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
453 hw_comp_cons++;
454
455 bd_cons = fp->rx_bd_cons;
456 bd_prod = fp->rx_bd_prod;
457 bd_prod_fw = bd_prod;
458 sw_comp_cons = fp->rx_comp_cons;
459 sw_comp_prod = fp->rx_comp_prod;
460
461 /* Memory barrier necessary as speculative reads of the rx
462 * buffer can be ahead of the index in the status block
463 */
464 rmb();
465
466 DP(NETIF_MSG_RX_STATUS,
467 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
468 fp->index, hw_comp_cons, sw_comp_cons);
469
470 while (sw_comp_cons != hw_comp_cons) {
471 struct sw_rx_bd *rx_buf = NULL;
472 struct sk_buff *skb;
473 union eth_rx_cqe *cqe;
474 u8 cqe_fp_flags;
475 u16 len, pad;
476
477 comp_ring_cons = RCQ_BD(sw_comp_cons);
478 bd_prod = RX_BD(bd_prod);
479 bd_cons = RX_BD(bd_cons);
480
481 /* Prefetch the page containing the BD descriptor
482 at producer's index. It will be needed when new skb is
483 allocated */
484 prefetch((void *)(PAGE_ALIGN((unsigned long)
485 (&fp->rx_desc_ring[bd_prod])) -
486 PAGE_SIZE + 1));
487
488 cqe = &fp->rx_comp_ring[comp_ring_cons];
489 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
490
491 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
492 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
493 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
494 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
495 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
496 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
497
498 /* is this a slowpath msg? */
499 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
500 bnx2x_sp_event(fp, cqe);
501 goto next_cqe;
502
503 /* this is an rx packet */
504 } else {
505 rx_buf = &fp->rx_buf_ring[bd_cons];
506 skb = rx_buf->skb;
507 prefetch(skb);
508 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
509 pad = cqe->fast_path_cqe.placement_offset;
510
511 /* If CQE is marked both TPA_START and TPA_END
512 it is a non-TPA CQE */
513 if ((!fp->disable_tpa) &&
514 (TPA_TYPE(cqe_fp_flags) !=
515 (TPA_TYPE_START | TPA_TYPE_END))) {
516 u16 queue = cqe->fast_path_cqe.queue_index;
517
518 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
519 DP(NETIF_MSG_RX_STATUS,
520 "calling tpa_start on queue %d\n",
521 queue);
522
523 bnx2x_tpa_start(fp, queue, skb,
524 bd_cons, bd_prod);
525
526 /* Set Toeplitz hash for an LRO skb */
527 bnx2x_set_skb_rxhash(bp, cqe, skb);
528
529 goto next_rx;
530 }
531
532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
533 DP(NETIF_MSG_RX_STATUS,
534 "calling tpa_stop on queue %d\n",
535 queue);
536
537 if (!BNX2X_RX_SUM_FIX(cqe))
538 BNX2X_ERR("STOP on none TCP "
539 "data\n");
540
541 /* This is a size of the linear data
542 on this skb */
543 len = le16_to_cpu(cqe->fast_path_cqe.
544 len_on_bd);
545 bnx2x_tpa_stop(bp, fp, queue, pad,
546 len, cqe, comp_ring_cons);
547#ifdef BNX2X_STOP_ON_ERROR
548 if (bp->panic)
549 return 0;
550#endif
551
552 bnx2x_update_sge_prod(fp,
553 &cqe->fast_path_cqe);
554 goto next_cqe;
555 }
556 }
557
558 dma_sync_single_for_device(&bp->pdev->dev,
559 dma_unmap_addr(rx_buf, mapping),
560 pad + RX_COPY_THRESH,
561 DMA_FROM_DEVICE);
562 prefetch(((char *)(skb)) + 128);
563
564 /* is this an error packet? */
565 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
566 DP(NETIF_MSG_RX_ERR,
567 "ERROR flags %x rx packet %u\n",
568 cqe_fp_flags, sw_comp_cons);
569 fp->eth_q_stats.rx_err_discard_pkt++;
570 goto reuse_rx;
571 }
572
573 /* Since we don't have a jumbo ring
574 * copy small packets if mtu > 1500
575 */
576 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
577 (len <= RX_COPY_THRESH)) {
578 struct sk_buff *new_skb;
579
580 new_skb = netdev_alloc_skb(bp->dev,
581 len + pad);
582 if (new_skb == NULL) {
583 DP(NETIF_MSG_RX_ERR,
584 "ERROR packet dropped "
585 "because of alloc failure\n");
586 fp->eth_q_stats.rx_skb_alloc_failed++;
587 goto reuse_rx;
588 }
589
590 /* aligned copy */
591 skb_copy_from_linear_data_offset(skb, pad,
592 new_skb->data + pad, len);
593 skb_reserve(new_skb, pad);
594 skb_put(new_skb, len);
595
596 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
597
598 skb = new_skb;
599
600 } else
601 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
602 dma_unmap_single(&bp->pdev->dev,
603 dma_unmap_addr(rx_buf, mapping),
604 bp->rx_buf_size,
605 DMA_FROM_DEVICE);
606 skb_reserve(skb, pad);
607 skb_put(skb, len);
608
609 } else {
610 DP(NETIF_MSG_RX_ERR,
611 "ERROR packet dropped because "
612 "of alloc failure\n");
613 fp->eth_q_stats.rx_skb_alloc_failed++;
614reuse_rx:
615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
616 goto next_rx;
617 }
618
619 skb->protocol = eth_type_trans(skb, bp->dev);
620
621 /* Set Toeplitz hash for a none-LRO skb */
622 bnx2x_set_skb_rxhash(bp, cqe, skb);
623
624 skb->ip_summed = CHECKSUM_NONE;
625 if (bp->rx_csum) {
626 if (likely(BNX2X_RX_CSUM_OK(cqe)))
627 skb->ip_summed = CHECKSUM_UNNECESSARY;
628 else
629 fp->eth_q_stats.hw_csum_err++;
630 }
631 }
632
633 skb_record_rx_queue(skb, fp->index);
634
635#ifdef BCM_VLAN
636 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
637 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
638 PARSING_FLAGS_VLAN))
639 vlan_gro_receive(&fp->napi, bp->vlgrp,
640 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
641 else
642#endif
643 napi_gro_receive(&fp->napi, skb);
644
645
646next_rx:
647 rx_buf->skb = NULL;
648
649 bd_cons = NEXT_RX_IDX(bd_cons);
650 bd_prod = NEXT_RX_IDX(bd_prod);
651 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
652 rx_pkt++;
653next_cqe:
654 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
655 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
656
657 if (rx_pkt == budget)
658 break;
659 } /* while */
660
661 fp->rx_bd_cons = bd_cons;
662 fp->rx_bd_prod = bd_prod_fw;
663 fp->rx_comp_cons = sw_comp_cons;
664 fp->rx_comp_prod = sw_comp_prod;
665
666 /* Update producers */
667 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
668 fp->rx_sge_prod);
669
670 fp->rx_pkt += rx_pkt;
671 fp->rx_calls++;
672
673 return rx_pkt;
674}
675
676static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
677{
678 struct bnx2x_fastpath *fp = fp_cookie;
679 struct bnx2x *bp = fp->bp;
680
681 /* Return here if interrupt is disabled */
682 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
683 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
684 return IRQ_HANDLED;
685 }
686
687 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
688 fp->index, fp->sb_id);
689 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
690
691#ifdef BNX2X_STOP_ON_ERROR
692 if (unlikely(bp->panic))
693 return IRQ_HANDLED;
694#endif
695
696 /* Handle Rx and Tx according to MSI-X vector */
697 prefetch(fp->rx_cons_sb);
698 prefetch(fp->tx_cons_sb);
699 prefetch(&fp->status_blk->u_status_block.status_block_index);
700 prefetch(&fp->status_blk->c_status_block.status_block_index);
701 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
702
703 return IRQ_HANDLED;
704}
705
706
707/* HW Lock for shared dual port PHYs */
708void bnx2x_acquire_phy_lock(struct bnx2x *bp)
709{
710 mutex_lock(&bp->port.phy_mutex);
711
712 if (bp->port.need_hw_lock)
713 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
714}
715
716void bnx2x_release_phy_lock(struct bnx2x *bp)
717{
718 if (bp->port.need_hw_lock)
719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
720
721 mutex_unlock(&bp->port.phy_mutex);
722}
723
724void bnx2x_link_report(struct bnx2x *bp)
725{
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
729 return;
730 }
731
732 if (bp->link_vars.link_up) {
733 u16 line_speed;
734
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
738
739 line_speed = bp->link_vars.line_speed;
740 if (IS_E1HMF(bp)) {
741 u16 vn_max_rate;
742
743 vn_max_rate =
744 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
745 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
746 if (vn_max_rate < line_speed)
747 line_speed = vn_max_rate;
748 }
749 pr_cont("%d Mbps ", line_speed);
750
751 if (bp->link_vars.duplex == DUPLEX_FULL)
752 pr_cont("full duplex");
753 else
754 pr_cont("half duplex");
755
756 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
757 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
758 pr_cont(", receive ");
759 if (bp->link_vars.flow_ctrl &
760 BNX2X_FLOW_CTRL_TX)
761 pr_cont("& transmit ");
762 } else {
763 pr_cont(", transmit ");
764 }
765 pr_cont("flow control ON");
766 }
767 pr_cont("\n");
768
769 } else { /* link_down */
770 netif_carrier_off(bp->dev);
771 netdev_err(bp->dev, "NIC Link is Down\n");
772 }
773}
774
775void bnx2x_init_rx_rings(struct bnx2x *bp)
776{
777 int func = BP_FUNC(bp);
778 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
779 ETH_MAX_AGGREGATION_QUEUES_E1H;
780 u16 ring_prod, cqe_ring_prod;
781 int i, j;
782
783 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
784 DP(NETIF_MSG_IFUP,
785 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
786
787 if (bp->flags & TPA_ENABLE_FLAG) {
788
789 for_each_queue(bp, j) {
790 struct bnx2x_fastpath *fp = &bp->fp[j];
791
792 for (i = 0; i < max_agg_queues; i++) {
793 fp->tpa_pool[i].skb =
794 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
795 if (!fp->tpa_pool[i].skb) {
796 BNX2X_ERR("Failed to allocate TPA "
797 "skb pool for queue[%d] - "
798 "disabling TPA on this "
799 "queue!\n", j);
800 bnx2x_free_tpa_pool(bp, fp, i);
801 fp->disable_tpa = 1;
802 break;
803 }
804 dma_unmap_addr_set((struct sw_rx_bd *)
805 &bp->fp->tpa_pool[i],
806 mapping, 0);
807 fp->tpa_state[i] = BNX2X_TPA_STOP;
808 }
809 }
810 }
811
812 for_each_queue(bp, j) {
813 struct bnx2x_fastpath *fp = &bp->fp[j];
814
815 fp->rx_bd_cons = 0;
816 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
817 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
818
819 /* "next page" elements initialization */
820 /* SGE ring */
821 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
822 struct eth_rx_sge *sge;
823
824 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
825 sge->addr_hi =
826 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
827 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
828 sge->addr_lo =
829 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
830 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
831 }
832
833 bnx2x_init_sge_ring_bit_mask(fp);
834
835 /* RX BD ring */
836 for (i = 1; i <= NUM_RX_RINGS; i++) {
837 struct eth_rx_bd *rx_bd;
838
839 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
840 rx_bd->addr_hi =
841 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
842 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
843 rx_bd->addr_lo =
844 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
845 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
846 }
847
848 /* CQ ring */
849 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
850 struct eth_rx_cqe_next_page *nextpg;
851
852 nextpg = (struct eth_rx_cqe_next_page *)
853 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
854 nextpg->addr_hi =
855 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
856 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
857 nextpg->addr_lo =
858 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
859 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
860 }
861
862 /* Allocate SGEs and initialize the ring elements */
863 for (i = 0, ring_prod = 0;
864 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
865
866 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
867 BNX2X_ERR("was only able to allocate "
868 "%d rx sges\n", i);
869 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
870 /* Cleanup already allocated elements */
871 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
872 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
873 fp->disable_tpa = 1;
874 ring_prod = 0;
875 break;
876 }
877 ring_prod = NEXT_SGE_IDX(ring_prod);
878 }
879 fp->rx_sge_prod = ring_prod;
880
881 /* Allocate BDs and initialize BD ring */
882 fp->rx_comp_cons = 0;
883 cqe_ring_prod = ring_prod = 0;
884 for (i = 0; i < bp->rx_ring_size; i++) {
885 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
886 BNX2X_ERR("was only able to allocate "
887 "%d rx skbs on queue[%d]\n", i, j);
888 fp->eth_q_stats.rx_skb_alloc_failed++;
889 break;
890 }
891 ring_prod = NEXT_RX_IDX(ring_prod);
892 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
893 WARN_ON(ring_prod <= i);
894 }
895
896 fp->rx_bd_prod = ring_prod;
897 /* must not have more available CQEs than BDs */
898 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
899 cqe_ring_prod);
900 fp->rx_pkt = fp->rx_calls = 0;
901
902 /* Warning!
903 * this will generate an interrupt (to the TSTORM)
904 * must only be done after chip is initialized
905 */
906 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
907 fp->rx_sge_prod);
908 if (j != 0)
909 continue;
910
911 REG_WR(bp, BAR_USTRORM_INTMEM +
912 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
913 U64_LO(fp->rx_comp_mapping));
914 REG_WR(bp, BAR_USTRORM_INTMEM +
915 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
916 U64_HI(fp->rx_comp_mapping));
917 }
918}
919static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{
921 int i;
922
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
925
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
929
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
932 sw_cons++;
933 }
934 }
935}
936
937static void bnx2x_free_rx_skbs(struct bnx2x *bp)
938{
939 int i, j;
940
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
943
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
947
948 if (skb == NULL)
949 continue;
950
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
954
955 rx_buf->skb = NULL;
956 dev_kfree_skb(skb);
957 }
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
962 }
963}
964
965void bnx2x_free_skbs(struct bnx2x *bp)
966{
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
969}
970
971static void bnx2x_free_msix_irqs(struct bnx2x *bp)
972{
973 int i, offset = 1;
974
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
978
979#ifdef BCM_CNIC
980 offset++;
981#endif
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
986
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
988 }
989}
990
991void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
992{
993 if (bp->flags & USING_MSIX_FLAG) {
994 if (!disable_only)
995 bnx2x_free_msix_irqs(bp);
996 pci_disable_msix(bp->pdev);
997 bp->flags &= ~USING_MSIX_FLAG;
998
999 } else if (bp->flags & USING_MSI_FLAG) {
1000 if (!disable_only)
1001 free_irq(bp->pdev->irq, bp->dev);
1002 pci_disable_msi(bp->pdev);
1003 bp->flags &= ~USING_MSI_FLAG;
1004
1005 } else if (!disable_only)
1006 free_irq(bp->pdev->irq, bp->dev);
1007}
1008
1009static int bnx2x_enable_msix(struct bnx2x *bp)
1010{
1011 int i, rc, offset = 1;
1012 int igu_vec = 0;
1013
1014 bp->msix_table[0].entry = igu_vec;
1015 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1016
1017#ifdef BCM_CNIC
1018 igu_vec = BP_L_ID(bp) + offset;
1019 bp->msix_table[1].entry = igu_vec;
1020 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1021 offset++;
1022#endif
1023 for_each_queue(bp, i) {
1024 igu_vec = BP_L_ID(bp) + offset + i;
1025 bp->msix_table[i + offset].entry = igu_vec;
1026 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1027 "(fastpath #%u)\n", i + offset, igu_vec, i);
1028 }
1029
1030 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1031 BNX2X_NUM_QUEUES(bp) + offset);
1032
1033 /*
1034 * reconfigure number of tx/rx queues according to available
1035 * MSI-X vectors
1036 */
1037 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1038 /* vectors available for FP */
1039 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1040
1041 DP(NETIF_MSG_IFUP,
1042 "Trying to use less MSI-X vectors: %d\n", rc);
1043
1044 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1045
1046 if (rc) {
1047 DP(NETIF_MSG_IFUP,
1048 "MSI-X is not attainable rc %d\n", rc);
1049 return rc;
1050 }
1051
1052 bp->num_queues = min(bp->num_queues, fp_vec);
1053
1054 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1055 bp->num_queues);
1056 } else if (rc) {
1057 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1058 return rc;
1059 }
1060
1061 bp->flags |= USING_MSIX_FLAG;
1062
1063 return 0;
1064}
1065
1066static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1067{
1068 int i, rc, offset = 1;
1069
1070 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1071 bp->dev->name, bp->dev);
1072 if (rc) {
1073 BNX2X_ERR("request sp irq failed\n");
1074 return -EBUSY;
1075 }
1076
1077#ifdef BCM_CNIC
1078 offset++;
1079#endif
1080 for_each_queue(bp, i) {
1081 struct bnx2x_fastpath *fp = &bp->fp[i];
1082 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1083 bp->dev->name, i);
1084
1085 rc = request_irq(bp->msix_table[i + offset].vector,
1086 bnx2x_msix_fp_int, 0, fp->name, fp);
1087 if (rc) {
1088 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1089 bnx2x_free_msix_irqs(bp);
1090 return -EBUSY;
1091 }
1092
1093 fp->state = BNX2X_FP_STATE_IRQ;
1094 }
1095
1096 i = BNX2X_NUM_QUEUES(bp);
1097 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1098 " ... fp[%d] %d\n",
1099 bp->msix_table[0].vector,
1100 0, bp->msix_table[offset].vector,
1101 i - 1, bp->msix_table[offset + i - 1].vector);
1102
1103 return 0;
1104}
1105
1106static int bnx2x_enable_msi(struct bnx2x *bp)
1107{
1108 int rc;
1109
1110 rc = pci_enable_msi(bp->pdev);
1111 if (rc) {
1112 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1113 return -1;
1114 }
1115 bp->flags |= USING_MSI_FLAG;
1116
1117 return 0;
1118}
1119
1120static int bnx2x_req_irq(struct bnx2x *bp)
1121{
1122 unsigned long flags;
1123 int rc;
1124
1125 if (bp->flags & USING_MSI_FLAG)
1126 flags = 0;
1127 else
1128 flags = IRQF_SHARED;
1129
1130 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1131 bp->dev->name, bp->dev);
1132 if (!rc)
1133 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1134
1135 return rc;
1136}
1137
1138static void bnx2x_napi_enable(struct bnx2x *bp)
1139{
1140 int i;
1141
1142 for_each_queue(bp, i)
1143 napi_enable(&bnx2x_fp(bp, i, napi));
1144}
1145
1146static void bnx2x_napi_disable(struct bnx2x *bp)
1147{
1148 int i;
1149
1150 for_each_queue(bp, i)
1151 napi_disable(&bnx2x_fp(bp, i, napi));
1152}
1153
1154void bnx2x_netif_start(struct bnx2x *bp)
1155{
1156 int intr_sem;
1157
1158 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1159 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1160
1161 if (intr_sem) {
1162 if (netif_running(bp->dev)) {
1163 bnx2x_napi_enable(bp);
1164 bnx2x_int_enable(bp);
1165 if (bp->state == BNX2X_STATE_OPEN)
1166 netif_tx_wake_all_queues(bp->dev);
1167 }
1168 }
1169}
1170
1171void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1172{
1173 bnx2x_int_disable_sync(bp, disable_hw);
1174 bnx2x_napi_disable(bp);
1175 netif_tx_disable(bp->dev);
1176}
1177static int bnx2x_set_num_queues(struct bnx2x *bp)
1178{
1179 int rc = 0;
1180
1181 switch (bp->int_mode) {
1182 case INT_MODE_INTx:
1183 case INT_MODE_MSI:
1184 bp->num_queues = 1;
1185 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1186 break;
1187 default:
1188 /* Set number of queues according to bp->multi_mode value */
1189 bnx2x_set_num_queues_msix(bp);
1190
1191 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1192 bp->num_queues);
1193
1194 /* if we can't use MSI-X we only need one fp,
1195 * so try to enable MSI-X with the requested number of fp's
1196 * and fallback to MSI or legacy INTx with one fp
1197 */
1198 rc = bnx2x_enable_msix(bp);
1199 if (rc)
1200 /* failed to enable MSI-X */
1201 bp->num_queues = 1;
1202 break;
1203 }
1204 bp->dev->real_num_tx_queues = bp->num_queues;
1205 return rc;
1206}
1207
1208/* must be called with rtnl_lock */
1209int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1210{
1211 u32 load_code;
1212 int i, rc;
1213
1214#ifdef BNX2X_STOP_ON_ERROR
1215 if (unlikely(bp->panic))
1216 return -EPERM;
1217#endif
1218
1219 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1220
1221 rc = bnx2x_set_num_queues(bp);
1222
1223 if (bnx2x_alloc_mem(bp)) {
1224 bnx2x_free_irq(bp, true);
1225 return -ENOMEM;
1226 }
1227
1228 for_each_queue(bp, i)
1229 bnx2x_fp(bp, i, disable_tpa) =
1230 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1231
1232 for_each_queue(bp, i)
1233 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1234 bnx2x_poll, 128);
1235
1236 bnx2x_napi_enable(bp);
1237
1238 if (bp->flags & USING_MSIX_FLAG) {
1239 rc = bnx2x_req_msix_irqs(bp);
1240 if (rc) {
1241 bnx2x_free_irq(bp, true);
1242 goto load_error1;
1243 }
1244 } else {
1245 /* Fall to INTx if failed to enable MSI-X due to lack of
1246 memory (in bnx2x_set_num_queues()) */
1247 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1248 bnx2x_enable_msi(bp);
1249 bnx2x_ack_int(bp);
1250 rc = bnx2x_req_irq(bp);
1251 if (rc) {
1252 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1253 bnx2x_free_irq(bp, true);
1254 goto load_error1;
1255 }
1256 if (bp->flags & USING_MSI_FLAG) {
1257 bp->dev->irq = bp->pdev->irq;
1258 netdev_info(bp->dev, "using MSI IRQ %d\n",
1259 bp->pdev->irq);
1260 }
1261 }
1262
1263 /* Send LOAD_REQUEST command to MCP
1264 Returns the type of LOAD command:
1265 if it is the first port to be initialized
1266 common blocks should be initialized, otherwise - not
1267 */
1268 if (!BP_NOMCP(bp)) {
1269 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
1270 if (!load_code) {
1271 BNX2X_ERR("MCP response failure, aborting\n");
1272 rc = -EBUSY;
1273 goto load_error2;
1274 }
1275 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1276 rc = -EBUSY; /* other port in diagnostic mode */
1277 goto load_error2;
1278 }
1279
1280 } else {
1281 int port = BP_PORT(bp);
1282
1283 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1284 load_count[0], load_count[1], load_count[2]);
1285 load_count[0]++;
1286 load_count[1 + port]++;
1287 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1288 load_count[0], load_count[1], load_count[2]);
1289 if (load_count[0] == 1)
1290 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1291 else if (load_count[1 + port] == 1)
1292 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1293 else
1294 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1295 }
1296
1297 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1298 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1299 bp->port.pmf = 1;
1300 else
1301 bp->port.pmf = 0;
1302 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1303
1304 /* Initialize HW */
1305 rc = bnx2x_init_hw(bp, load_code);
1306 if (rc) {
1307 BNX2X_ERR("HW init failed, aborting\n");
1308 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1311 goto load_error2;
1312 }
1313
1314 /* Setup NIC internals and enable interrupts */
1315 bnx2x_nic_init(bp, load_code);
1316
1317 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1318 (bp->common.shmem2_base))
1319 SHMEM2_WR(bp, dcc_support,
1320 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1321 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1322
1323 /* Send LOAD_DONE command to MCP */
1324 if (!BP_NOMCP(bp)) {
1325 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1326 if (!load_code) {
1327 BNX2X_ERR("MCP response failure, aborting\n");
1328 rc = -EBUSY;
1329 goto load_error3;
1330 }
1331 }
1332
1333 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1334
1335 rc = bnx2x_setup_leading(bp);
1336 if (rc) {
1337 BNX2X_ERR("Setup leading failed!\n");
1338#ifndef BNX2X_STOP_ON_ERROR
1339 goto load_error3;
1340#else
1341 bp->panic = 1;
1342 return -EBUSY;
1343#endif
1344 }
1345
1346 if (CHIP_IS_E1H(bp))
1347 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1348 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1349 bp->flags |= MF_FUNC_DIS;
1350 }
1351
1352 if (bp->state == BNX2X_STATE_OPEN) {
1353#ifdef BCM_CNIC
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1356#endif
1357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_multi(bp, i);
1359 if (rc)
1360#ifdef BCM_CNIC
1361 goto load_error4;
1362#else
1363 goto load_error3;
1364#endif
1365 }
1366
1367 if (CHIP_IS_E1(bp))
1368 bnx2x_set_eth_mac_addr_e1(bp, 1);
1369 else
1370 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1371#ifdef BCM_CNIC
1372 /* Set iSCSI L2 MAC */
1373 mutex_lock(&bp->cnic_mutex);
1374 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1375 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1376 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1377 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1378 CNIC_SB_ID(bp));
1379 }
1380 mutex_unlock(&bp->cnic_mutex);
1381#endif
1382 }
1383
1384 if (bp->port.pmf)
1385 bnx2x_initial_phy_init(bp, load_mode);
1386
1387 /* Start fast path */
1388 switch (load_mode) {
1389 case LOAD_NORMAL:
1390 if (bp->state == BNX2X_STATE_OPEN) {
1391 /* Tx queue should be only reenabled */
1392 netif_tx_wake_all_queues(bp->dev);
1393 }
1394 /* Initialize the receive filter. */
1395 bnx2x_set_rx_mode(bp->dev);
1396 break;
1397
1398 case LOAD_OPEN:
1399 netif_tx_start_all_queues(bp->dev);
1400 if (bp->state != BNX2X_STATE_OPEN)
1401 netif_tx_disable(bp->dev);
1402 /* Initialize the receive filter. */
1403 bnx2x_set_rx_mode(bp->dev);
1404 break;
1405
1406 case LOAD_DIAG:
1407 /* Initialize the receive filter. */
1408 bnx2x_set_rx_mode(bp->dev);
1409 bp->state = BNX2X_STATE_DIAG;
1410 break;
1411
1412 default:
1413 break;
1414 }
1415
1416 if (!bp->port.pmf)
1417 bnx2x__link_status_update(bp);
1418
1419 /* start the timer */
1420 mod_timer(&bp->timer, jiffies + bp->current_interval);
1421
1422#ifdef BCM_CNIC
1423 bnx2x_setup_cnic_irq_info(bp);
1424 if (bp->state == BNX2X_STATE_OPEN)
1425 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1426#endif
1427 bnx2x_inc_load_cnt(bp);
1428
1429 return 0;
1430
1431#ifdef BCM_CNIC
1432load_error4:
1433 /* Disable Timer scan */
1434 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1435#endif
1436load_error3:
1437 bnx2x_int_disable_sync(bp, 1);
1438 if (!BP_NOMCP(bp)) {
1439 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1441 }
1442 bp->port.pmf = 0;
1443 /* Free SKBs, SGEs, TPA pool and driver internals */
1444 bnx2x_free_skbs(bp);
1445 for_each_queue(bp, i)
1446 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1447load_error2:
1448 /* Release IRQs */
1449 bnx2x_free_irq(bp, false);
1450load_error1:
1451 bnx2x_napi_disable(bp);
1452 for_each_queue(bp, i)
1453 netif_napi_del(&bnx2x_fp(bp, i, napi));
1454 bnx2x_free_mem(bp);
1455
1456 return rc;
1457}
1458
1459/* must be called with rtnl_lock */
1460int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1461{
1462 int i;
1463
1464 if (bp->state == BNX2X_STATE_CLOSED) {
1465 /* Interface has been removed - nothing to recover */
1466 bp->recovery_state = BNX2X_RECOVERY_DONE;
1467 bp->is_leader = 0;
1468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1469 smp_wmb();
1470
1471 return -EINVAL;
1472 }
1473
1474#ifdef BCM_CNIC
1475 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1476#endif
1477 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1478
1479 /* Set "drop all" */
1480 bp->rx_mode = BNX2X_RX_MODE_NONE;
1481 bnx2x_set_storm_rx_mode(bp);
1482
1483 /* Disable HW interrupts, NAPI and Tx */
1484 bnx2x_netif_stop(bp, 1);
1485 netif_carrier_off(bp->dev);
1486
1487 del_timer_sync(&bp->timer);
1488 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1489 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1490 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1491
1492 /* Release IRQs */
1493 bnx2x_free_irq(bp, false);
1494
1495 /* Cleanup the chip if needed */
1496 if (unload_mode != UNLOAD_RECOVERY)
1497 bnx2x_chip_cleanup(bp, unload_mode);
1498
1499 bp->port.pmf = 0;
1500
1501 /* Free SKBs, SGEs, TPA pool and driver internals */
1502 bnx2x_free_skbs(bp);
1503 for_each_queue(bp, i)
1504 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1505 for_each_queue(bp, i)
1506 netif_napi_del(&bnx2x_fp(bp, i, napi));
1507 bnx2x_free_mem(bp);
1508
1509 bp->state = BNX2X_STATE_CLOSED;
1510
1511 /* The last driver must disable a "close the gate" if there is no
1512 * parity attention or "process kill" pending.
1513 */
1514 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1515 bnx2x_reset_is_done(bp))
1516 bnx2x_disable_close_the_gate(bp);
1517
1518 /* Reset MCP mail box sequence if there is on going recovery */
1519 if (unload_mode == UNLOAD_RECOVERY)
1520 bp->fw_seq = 0;
1521
1522 return 0;
1523}
1524int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1525{
1526 u16 pmcsr;
1527
1528 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1529
1530 switch (state) {
1531 case PCI_D0:
1532 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1533 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1534 PCI_PM_CTRL_PME_STATUS));
1535
1536 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1537 /* delay required during transition out of D3hot */
1538 msleep(20);
1539 break;
1540
1541 case PCI_D3hot:
1542 /* If there are other clients above don't
1543 shut down the power */
1544 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1545 return 0;
1546 /* Don't shut down the power for emulation and FPGA */
1547 if (CHIP_REV_IS_SLOW(bp))
1548 return 0;
1549
1550 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1551 pmcsr |= 3;
1552
1553 if (bp->wol)
1554 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1555
1556 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1557 pmcsr);
1558
1559 /* No more memory access after this point until
1560 * device is brought back to D0.
1561 */
1562 break;
1563
1564 default:
1565 return -EINVAL;
1566 }
1567 return 0;
1568}
1569
1570
1571
1572/*
1573 * net_device service functions
1574 */
1575
1576static int bnx2x_poll(struct napi_struct *napi, int budget)
1577{
1578 int work_done = 0;
1579 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1580 napi);
1581 struct bnx2x *bp = fp->bp;
1582
1583 while (1) {
1584#ifdef BNX2X_STOP_ON_ERROR
1585 if (unlikely(bp->panic)) {
1586 napi_complete(napi);
1587 return 0;
1588 }
1589#endif
1590
1591 if (bnx2x_has_tx_work(fp))
1592 bnx2x_tx_int(fp);
1593
1594 if (bnx2x_has_rx_work(fp)) {
1595 work_done += bnx2x_rx_int(fp, budget - work_done);
1596
1597 /* must not complete if we consumed full budget */
1598 if (work_done >= budget)
1599 break;
1600 }
1601
1602 /* Fall out from the NAPI loop if needed */
1603 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1604 bnx2x_update_fpsb_idx(fp);
1605 /* bnx2x_has_rx_work() reads the status block, thus we need
1606 * to ensure that status block indices have been actually read
1607 * (bnx2x_update_fpsb_idx) prior to this check
1608 * (bnx2x_has_rx_work) so that we won't write the "newer"
1609 * value of the status block to IGU (if there was a DMA right
1610 * after bnx2x_has_rx_work and if there is no rmb, the memory
1611 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1612 * before bnx2x_ack_sb). In this case there will never be
1613 * another interrupt until there is another update of the
1614 * status block, while there is still unhandled work.
1615 */
1616 rmb();
1617
1618 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1619 napi_complete(napi);
1620 /* Re-enable interrupts */
1621 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1622 le16_to_cpu(fp->fp_c_idx),
1623 IGU_INT_NOP, 1);
1624 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1625 le16_to_cpu(fp->fp_u_idx),
1626 IGU_INT_ENABLE, 1);
1627 break;
1628 }
1629 }
1630 }
1631
1632 return work_done;
1633}
1634
1635
1636/* we split the first BD into headers and data BDs
1637 * to ease the pain of our fellow microcode engineers
1638 * we use one mapping for both BDs
1639 * So far this has only been observed to happen
1640 * in Other Operating Systems(TM)
1641 */
1642static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1643 struct bnx2x_fastpath *fp,
1644 struct sw_tx_bd *tx_buf,
1645 struct eth_tx_start_bd **tx_bd, u16 hlen,
1646 u16 bd_prod, int nbd)
1647{
1648 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1649 struct eth_tx_bd *d_tx_bd;
1650 dma_addr_t mapping;
1651 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1652
1653 /* first fix first BD */
1654 h_tx_bd->nbd = cpu_to_le16(nbd);
1655 h_tx_bd->nbytes = cpu_to_le16(hlen);
1656
1657 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1658 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1659 h_tx_bd->addr_lo, h_tx_bd->nbd);
1660
1661 /* now get a new data BD
1662 * (after the pbd) and fill it */
1663 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1664 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1665
1666 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1667 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1668
1669 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1670 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1671 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1672
1673 /* this marks the BD as one that has no individual mapping */
1674 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1675
1676 DP(NETIF_MSG_TX_QUEUED,
1677 "TSO split data size is %d (%x:%x)\n",
1678 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1679
1680 /* update tx_bd */
1681 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1682
1683 return bd_prod;
1684}
1685
1686static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1687{
1688 if (fix > 0)
1689 csum = (u16) ~csum_fold(csum_sub(csum,
1690 csum_partial(t_header - fix, fix, 0)));
1691
1692 else if (fix < 0)
1693 csum = (u16) ~csum_fold(csum_add(csum,
1694 csum_partial(t_header, -fix, 0)));
1695
1696 return swab16(csum);
1697}
1698
1699static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1700{
1701 u32 rc;
1702
1703 if (skb->ip_summed != CHECKSUM_PARTIAL)
1704 rc = XMIT_PLAIN;
1705
1706 else {
1707 if (skb->protocol == htons(ETH_P_IPV6)) {
1708 rc = XMIT_CSUM_V6;
1709 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1710 rc |= XMIT_CSUM_TCP;
1711
1712 } else {
1713 rc = XMIT_CSUM_V4;
1714 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1715 rc |= XMIT_CSUM_TCP;
1716 }
1717 }
1718
1719 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1720 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1721
1722 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1723 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1724
1725 return rc;
1726}
1727
1728#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1729/* check if packet requires linearization (packet is too fragmented)
1730 no need to check fragmentation if page size > 8K (there will be no
1731 violation to FW restrictions) */
1732static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1733 u32 xmit_type)
1734{
1735 int to_copy = 0;
1736 int hlen = 0;
1737 int first_bd_sz = 0;
1738
1739 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1740 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1741
1742 if (xmit_type & XMIT_GSO) {
1743 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1744 /* Check if LSO packet needs to be copied:
1745 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1746 int wnd_size = MAX_FETCH_BD - 3;
1747 /* Number of windows to check */
1748 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1749 int wnd_idx = 0;
1750 int frag_idx = 0;
1751 u32 wnd_sum = 0;
1752
1753 /* Headers length */
1754 hlen = (int)(skb_transport_header(skb) - skb->data) +
1755 tcp_hdrlen(skb);
1756
1757 /* Amount of data (w/o headers) on linear part of SKB*/
1758 first_bd_sz = skb_headlen(skb) - hlen;
1759
1760 wnd_sum = first_bd_sz;
1761
1762 /* Calculate the first sum - it's special */
1763 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1764 wnd_sum +=
1765 skb_shinfo(skb)->frags[frag_idx].size;
1766
1767 /* If there was data on linear skb data - check it */
1768 if (first_bd_sz > 0) {
1769 if (unlikely(wnd_sum < lso_mss)) {
1770 to_copy = 1;
1771 goto exit_lbl;
1772 }
1773
1774 wnd_sum -= first_bd_sz;
1775 }
1776
1777 /* Others are easier: run through the frag list and
1778 check all windows */
1779 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1780 wnd_sum +=
1781 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1782
1783 if (unlikely(wnd_sum < lso_mss)) {
1784 to_copy = 1;
1785 break;
1786 }
1787 wnd_sum -=
1788 skb_shinfo(skb)->frags[wnd_idx].size;
1789 }
1790 } else {
1791 /* in non-LSO too fragmented packet should always
1792 be linearized */
1793 to_copy = 1;
1794 }
1795 }
1796
1797exit_lbl:
1798 if (unlikely(to_copy))
1799 DP(NETIF_MSG_TX_QUEUED,
1800 "Linearization IS REQUIRED for %s packet. "
1801 "num_frags %d hlen %d first_bd_sz %d\n",
1802 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1803 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1804
1805 return to_copy;
1806}
1807#endif
1808
1809/* called with netif_tx_lock
1810 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1811 * netif_wake_queue()
1812 */
1813netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1814{
1815 struct bnx2x *bp = netdev_priv(dev);
1816 struct bnx2x_fastpath *fp;
1817 struct netdev_queue *txq;
1818 struct sw_tx_bd *tx_buf;
1819 struct eth_tx_start_bd *tx_start_bd;
1820 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1821 struct eth_tx_parse_bd *pbd = NULL;
1822 u16 pkt_prod, bd_prod;
1823 int nbd, fp_index;
1824 dma_addr_t mapping;
1825 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1826 int i;
1827 u8 hlen = 0;
1828 __le16 pkt_size = 0;
1829 struct ethhdr *eth;
1830 u8 mac_type = UNICAST_ADDRESS;
1831
1832#ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1834 return NETDEV_TX_BUSY;
1835#endif
1836
1837 fp_index = skb_get_queue_mapping(skb);
1838 txq = netdev_get_tx_queue(dev, fp_index);
1839
1840 fp = &bp->fp[fp_index];
1841
1842 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1843 fp->eth_q_stats.driver_xoff++;
1844 netif_tx_stop_queue(txq);
1845 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1846 return NETDEV_TX_BUSY;
1847 }
1848
1849 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1850 " gso type %x xmit_type %x\n",
1851 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1852 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1853
1854 eth = (struct ethhdr *)skb->data;
1855
1856 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1857 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1858 if (is_broadcast_ether_addr(eth->h_dest))
1859 mac_type = BROADCAST_ADDRESS;
1860 else
1861 mac_type = MULTICAST_ADDRESS;
1862 }
1863
1864#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1865 /* First, check if we need to linearize the skb (due to FW
1866 restrictions). No need to check fragmentation if page size > 8K
1867 (there will be no violation to FW restrictions) */
1868 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1869 /* Statistics of linearization */
1870 bp->lin_cnt++;
1871 if (skb_linearize(skb) != 0) {
1872 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1873 "silently dropping this SKB\n");
1874 dev_kfree_skb_any(skb);
1875 return NETDEV_TX_OK;
1876 }
1877 }
1878#endif
1879
1880 /*
1881 Please read carefully. First we use one BD which we mark as start,
1882 then we have a parsing info BD (used for TSO or xsum),
1883 and only then we have the rest of the TSO BDs.
1884 (don't forget to mark the last one as last,
1885 and to unmap only AFTER you write to the BD ...)
1886 And above all, all pdb sizes are in words - NOT DWORDS!
1887 */
1888
1889 pkt_prod = fp->tx_pkt_prod++;
1890 bd_prod = TX_BD(fp->tx_bd_prod);
1891
1892 /* get a tx_buf and first BD */
1893 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1894 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1895
1896 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1897 tx_start_bd->general_data = (mac_type <<
1898 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1899 /* header nbd */
1900 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1901
1902 /* remember the first BD of the packet */
1903 tx_buf->first_bd = fp->tx_bd_prod;
1904 tx_buf->skb = skb;
1905 tx_buf->flags = 0;
1906
1907 DP(NETIF_MSG_TX_QUEUED,
1908 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1909 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1910
1911#ifdef BCM_VLAN
1912 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1913 (bp->flags & HW_VLAN_TX_FLAG)) {
1914 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1915 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1916 } else
1917#endif
1918 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1919
1920 /* turn on parsing and get a BD */
1921 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1922 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1923
1924 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1925
1926 if (xmit_type & XMIT_CSUM) {
1927 hlen = (skb_network_header(skb) - skb->data) / 2;
1928
1929 /* for now NS flag is not used in Linux */
1930 pbd->global_data =
1931 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1932 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1933
1934 pbd->ip_hlen = (skb_transport_header(skb) -
1935 skb_network_header(skb)) / 2;
1936
1937 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1938
1939 pbd->total_hlen = cpu_to_le16(hlen);
1940 hlen = hlen*2;
1941
1942 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1943
1944 if (xmit_type & XMIT_CSUM_V4)
1945 tx_start_bd->bd_flags.as_bitfield |=
1946 ETH_TX_BD_FLAGS_IP_CSUM;
1947 else
1948 tx_start_bd->bd_flags.as_bitfield |=
1949 ETH_TX_BD_FLAGS_IPV6;
1950
1951 if (xmit_type & XMIT_CSUM_TCP) {
1952 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1953
1954 } else {
1955 s8 fix = SKB_CS_OFF(skb); /* signed! */
1956
1957 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1958
1959 DP(NETIF_MSG_TX_QUEUED,
1960 "hlen %d fix %d csum before fix %x\n",
1961 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1962
1963 /* HW bug: fixup the CSUM */
1964 pbd->tcp_pseudo_csum =
1965 bnx2x_csum_fix(skb_transport_header(skb),
1966 SKB_CS(skb), fix);
1967
1968 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1969 pbd->tcp_pseudo_csum);
1970 }
1971 }
1972
1973 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1974 skb_headlen(skb), DMA_TO_DEVICE);
1975
1976 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1977 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1978 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
1979 tx_start_bd->nbd = cpu_to_le16(nbd);
1980 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1981 pkt_size = tx_start_bd->nbytes;
1982
1983 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
1984 " nbytes %d flags %x vlan %x\n",
1985 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
1986 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
1987 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
1988
1989 if (xmit_type & XMIT_GSO) {
1990
1991 DP(NETIF_MSG_TX_QUEUED,
1992 "TSO packet len %d hlen %d total len %d tso size %d\n",
1993 skb->len, hlen, skb_headlen(skb),
1994 skb_shinfo(skb)->gso_size);
1995
1996 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
1997
1998 if (unlikely(skb_headlen(skb) > hlen))
1999 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2000 hlen, bd_prod, ++nbd);
2001
2002 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2003 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2004 pbd->tcp_flags = pbd_tcp_flags(skb);
2005
2006 if (xmit_type & XMIT_GSO_V4) {
2007 pbd->ip_id = swab16(ip_hdr(skb)->id);
2008 pbd->tcp_pseudo_csum =
2009 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2010 ip_hdr(skb)->daddr,
2011 0, IPPROTO_TCP, 0));
2012
2013 } else
2014 pbd->tcp_pseudo_csum =
2015 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2016 &ipv6_hdr(skb)->daddr,
2017 0, IPPROTO_TCP, 0));
2018
2019 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2020 }
2021 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2022
2023 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2024 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2025
2026 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2027 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2028 if (total_pkt_bd == NULL)
2029 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2030
2031 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2032 frag->page_offset,
2033 frag->size, DMA_TO_DEVICE);
2034
2035 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2036 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2037 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2038 le16_add_cpu(&pkt_size, frag->size);
2039
2040 DP(NETIF_MSG_TX_QUEUED,
2041 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2042 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2043 le16_to_cpu(tx_data_bd->nbytes));
2044 }
2045
2046 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2047
2048 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2049
2050 /* now send a tx doorbell, counting the next BD
2051 * if the packet contains or ends with it
2052 */
2053 if (TX_BD_POFF(bd_prod) < nbd)
2054 nbd++;
2055
2056 if (total_pkt_bd != NULL)
2057 total_pkt_bd->total_pkt_bytes = pkt_size;
2058
2059 if (pbd)
2060 DP(NETIF_MSG_TX_QUEUED,
2061 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2062 " tcp_flags %x xsum %x seq %u hlen %u\n",
2063 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2064 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2065 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2066
2067 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2068
2069 /*
2070 * Make sure that the BD data is updated before updating the producer
2071 * since FW might read the BD right after the producer is updated.
2072 * This is only applicable for weak-ordered memory model archs such
2073 * as IA-64. The following barrier is also mandatory since FW will
2074 * assumes packets must have BDs.
2075 */
2076 wmb();
2077
2078 fp->tx_db.data.prod += nbd;
2079 barrier();
2080 DOORBELL(bp, fp->index, fp->tx_db.raw);
2081
2082 mmiowb();
2083
2084 fp->tx_bd_prod += nbd;
2085
2086 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2087 netif_tx_stop_queue(txq);
2088
2089 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2090 * ordering of set_bit() in netif_tx_stop_queue() and read of
2091 * fp->bd_tx_cons */
2092 smp_mb();
2093
2094 fp->eth_q_stats.driver_xoff++;
2095 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2096 netif_tx_wake_queue(txq);
2097 }
2098 fp->tx_pkt++;
2099
2100 return NETDEV_TX_OK;
2101}
2102/* called with rtnl_lock */
2103int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2104{
2105 struct sockaddr *addr = p;
2106 struct bnx2x *bp = netdev_priv(dev);
2107
2108 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2109 return -EINVAL;
2110
2111 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2112 if (netif_running(dev)) {
2113 if (CHIP_IS_E1(bp))
2114 bnx2x_set_eth_mac_addr_e1(bp, 1);
2115 else
2116 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2117 }
2118
2119 return 0;
2120}
2121
2122/* called with rtnl_lock */
2123int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2124{
2125 struct bnx2x *bp = netdev_priv(dev);
2126 int rc = 0;
2127
2128 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2129 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2130 return -EAGAIN;
2131 }
2132
2133 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2134 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2135 return -EINVAL;
2136
2137 /* This does not race with packet allocation
2138 * because the actual alloc size is
2139 * only updated as part of load
2140 */
2141 dev->mtu = new_mtu;
2142
2143 if (netif_running(dev)) {
2144 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2145 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2146 }
2147
2148 return rc;
2149}
2150
2151void bnx2x_tx_timeout(struct net_device *dev)
2152{
2153 struct bnx2x *bp = netdev_priv(dev);
2154
2155#ifdef BNX2X_STOP_ON_ERROR
2156 if (!bp->panic)
2157 bnx2x_panic();
2158#endif
2159 /* This allows the netif to be shutdown gracefully before resetting */
2160 schedule_delayed_work(&bp->reset_task, 0);
2161}
2162
2163#ifdef BCM_VLAN
2164/* called with rtnl_lock */
2165void bnx2x_vlan_rx_register(struct net_device *dev,
2166 struct vlan_group *vlgrp)
2167{
2168 struct bnx2x *bp = netdev_priv(dev);
2169
2170 bp->vlgrp = vlgrp;
2171
2172 /* Set flags according to the required capabilities */
2173 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2174
2175 if (dev->features & NETIF_F_HW_VLAN_TX)
2176 bp->flags |= HW_VLAN_TX_FLAG;
2177
2178 if (dev->features & NETIF_F_HW_VLAN_RX)
2179 bp->flags |= HW_VLAN_RX_FLAG;
2180
2181 if (netif_running(dev))
2182 bnx2x_set_client_config(bp);
2183}
2184
2185#endif
2186int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2187{
2188 struct net_device *dev = pci_get_drvdata(pdev);
2189 struct bnx2x *bp;
2190
2191 if (!dev) {
2192 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2193 return -ENODEV;
2194 }
2195 bp = netdev_priv(dev);
2196
2197 rtnl_lock();
2198
2199 pci_save_state(pdev);
2200
2201 if (!netif_running(dev)) {
2202 rtnl_unlock();
2203 return 0;
2204 }
2205
2206 netif_device_detach(dev);
2207
2208 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2209
2210 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2211
2212 rtnl_unlock();
2213
2214 return 0;
2215}
2216
2217int bnx2x_resume(struct pci_dev *pdev)
2218{
2219 struct net_device *dev = pci_get_drvdata(pdev);
2220 struct bnx2x *bp;
2221 int rc;
2222
2223 if (!dev) {
2224 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2225 return -ENODEV;
2226 }
2227 bp = netdev_priv(dev);
2228
2229 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2230 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2231 return -EAGAIN;
2232 }
2233
2234 rtnl_lock();
2235
2236 pci_restore_state(pdev);
2237
2238 if (!netif_running(dev)) {
2239 rtnl_unlock();
2240 return 0;
2241 }
2242
2243 bnx2x_set_power_state(bp, PCI_D0);
2244 netif_device_attach(dev);
2245
2246 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2247
2248 rtnl_unlock();
2249
2250 return rc;
2251}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
new file mode 100644
index 00000000000..d1979b1a7ed
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,652 @@
1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/netdevice.h>
22
23
24#include "bnx2x.h"
25
26
27/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
29 */
30
31/**
32 * Initialize link parameters structure variables.
33 *
34 * @param bp
35 * @param load_mode
36 *
37 * @return u8
38 */
39u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
40
41/**
42 * Configure hw according to link parameters structure.
43 *
44 * @param bp
45 */
46void bnx2x_link_set(struct bnx2x *bp);
47
48/**
49 * Query link status
50 *
51 * @param bp
52 *
53 * @return 0 - link is UP
54 */
55u8 bnx2x_link_test(struct bnx2x *bp);
56
57/**
58 * Handles link status change
59 *
60 * @param bp
61 */
62void bnx2x__link_status_update(struct bnx2x *bp);
63
64/**
65 * MSI-X slowpath interrupt handler
66 *
67 * @param irq
68 * @param dev_instance
69 *
70 * @return irqreturn_t
71 */
72irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
73
74/**
75 * non MSI-X interrupt handler
76 *
77 * @param irq
78 * @param dev_instance
79 *
80 * @return irqreturn_t
81 */
82irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
83#ifdef BCM_CNIC
84
85/**
86 * Send command to cnic driver
87 *
88 * @param bp
89 * @param cmd
90 */
91int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
92
93/**
94 * Provides cnic information for proper interrupt handling
95 *
96 * @param bp
97 */
98void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
99#endif
100
101/**
102 * Enable HW interrupts.
103 *
104 * @param bp
105 */
106void bnx2x_int_enable(struct bnx2x *bp);
107
108/**
109 * Disable interrupts. This function ensures that there are no
110 * ISRs or SP DPCs (sp_task) are running after it returns.
111 *
112 * @param bp
113 * @param disable_hw if true, disable HW interrupts.
114 */
115void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
116
117/**
118 * Init HW blocks according to current initialization stage:
119 * COMMON, PORT or FUNCTION.
120 *
121 * @param bp
122 * @param load_code: COMMON, PORT or FUNCTION
123 *
124 * @return int
125 */
126int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
127
128/**
129 * Init driver internals:
130 * - rings
131 * - status blocks
132 * - etc.
133 *
134 * @param bp
135 * @param load_code COMMON, PORT or FUNCTION
136 */
137void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
138
139/**
140 * Allocate driver's memory.
141 *
142 * @param bp
143 *
144 * @return int
145 */
146int bnx2x_alloc_mem(struct bnx2x *bp);
147
148/**
149 * Release driver's memory.
150 *
151 * @param bp
152 */
153void bnx2x_free_mem(struct bnx2x *bp);
154
155/**
156 * Bring up a leading (the first) eth Client.
157 *
158 * @param bp
159 *
160 * @return int
161 */
162int bnx2x_setup_leading(struct bnx2x *bp);
163
164/**
165 * Setup non-leading eth Client.
166 *
167 * @param bp
168 * @param fp
169 *
170 * @return int
171 */
172int bnx2x_setup_multi(struct bnx2x *bp, int index);
173
174/**
175 * Set number of quueus according to mode and number of available
176 * msi-x vectors
177 *
178 * @param bp
179 *
180 */
181void bnx2x_set_num_queues_msix(struct bnx2x *bp);
182
183/**
184 * Cleanup chip internals:
185 * - Cleanup MAC configuration.
186 * - Close clients.
187 * - etc.
188 *
189 * @param bp
190 * @param unload_mode
191 */
192void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
193
194/**
195 * Acquire HW lock.
196 *
197 * @param bp
198 * @param resource Resource bit which was locked
199 *
200 * @return int
201 */
202int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
203
204/**
205 * Release HW lock.
206 *
207 * @param bp driver handle
208 * @param resource Resource bit which was locked
209 *
210 * @return int
211 */
212int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
213
214/**
215 * Configure eth MAC address in the HW according to the value in
216 * netdev->dev_addr for 57711
217 *
218 * @param bp driver handle
219 * @param set
220 */
221void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
222
223/**
224 * Configure eth MAC address in the HW according to the value in
225 * netdev->dev_addr for 57710
226 *
227 * @param bp driver handle
228 * @param set
229 */
230void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
231
232#ifdef BCM_CNIC
233/**
234 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
235 * MAC(s). The function will wait until the ramrod completion
236 * returns.
237 *
238 * @param bp driver handle
239 * @param set set or clear the CAM entry
240 *
241 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
242 */
243int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
244#endif
245
246/**
247 * Initialize status block in FW and HW
248 *
249 * @param bp driver handle
250 * @param sb host_status_block
251 * @param dma_addr_t mapping
252 * @param int sb_id
253 */
254void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
255 dma_addr_t mapping, int sb_id);
256
257/**
258 * Reconfigure FW/HW according to dev->flags rx mode
259 *
260 * @param dev net_device
261 *
262 */
263void bnx2x_set_rx_mode(struct net_device *dev);
264
265/**
266 * Configure MAC filtering rules in a FW.
267 *
268 * @param bp driver handle
269 */
270void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
271
272/* Parity errors related */
273void bnx2x_inc_load_cnt(struct bnx2x *bp);
274u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
275bool bnx2x_chk_parity_attn(struct bnx2x *bp);
276bool bnx2x_reset_is_done(struct bnx2x *bp);
277void bnx2x_disable_close_the_gate(struct bnx2x *bp);
278
279/**
280 * Perform statistics handling according to event
281 *
282 * @param bp driver handle
283 * @param even tbnx2x_stats_event
284 */
285void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
286
287/**
288 * Configures FW with client paramteres (like HW VLAN removal)
289 * for each active client.
290 *
291 * @param bp
292 */
293void bnx2x_set_client_config(struct bnx2x *bp);
294
295/**
296 * Handle sp events
297 *
298 * @param fp fastpath handle for the event
299 * @param rr_cqe eth_rx_cqe
300 */
301void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
302
303
304static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
305{
306 struct host_status_block *fpsb = fp->status_blk;
307
308 barrier(); /* status block is written to by the chip */
309 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
310 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
311}
312
313static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
314 struct bnx2x_fastpath *fp,
315 u16 bd_prod, u16 rx_comp_prod,
316 u16 rx_sge_prod)
317{
318 struct ustorm_eth_rx_producers rx_prods = {0};
319 int i;
320
321 /* Update producers */
322 rx_prods.bd_prod = bd_prod;
323 rx_prods.cqe_prod = rx_comp_prod;
324 rx_prods.sge_prod = rx_sge_prod;
325
326 /*
327 * Make sure that the BD and SGE data is updated before updating the
328 * producers since FW might read the BD/SGE right after the producer
329 * is updated.
330 * This is only applicable for weak-ordered memory model archs such
331 * as IA-64. The following barrier is also mandatory since FW will
332 * assumes BDs must have buffers.
333 */
334 wmb();
335
336 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
337 REG_WR(bp, BAR_USTRORM_INTMEM +
338 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
339 ((u32 *)&rx_prods)[i]);
340
341 mmiowb(); /* keep prod updates ordered */
342
343 DP(NETIF_MSG_RX_STATUS,
344 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
345 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
346}
347
348
349
350static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
351 u8 storm, u16 index, u8 op, u8 update)
352{
353 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
354 COMMAND_REG_INT_ACK);
355 struct igu_ack_register igu_ack;
356
357 igu_ack.status_block_index = index;
358 igu_ack.sb_id_and_flags =
359 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
360 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
361 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
362 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
363
364 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
365 (*(u32 *)&igu_ack), hc_addr);
366 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
367
368 /* Make sure that ACK is written */
369 mmiowb();
370 barrier();
371}
372static inline u16 bnx2x_ack_int(struct bnx2x *bp)
373{
374 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
375 COMMAND_REG_SIMD_MASK);
376 u32 result = REG_RD(bp, hc_addr);
377
378 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
379 result, hc_addr);
380
381 return result;
382}
383
384/*
385 * fast path service functions
386 */
387
388static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
389{
390 /* Tell compiler that consumer and producer can change */
391 barrier();
392 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
393}
394
395static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
396{
397 s16 used;
398 u16 prod;
399 u16 cons;
400
401 prod = fp->tx_bd_prod;
402 cons = fp->tx_bd_cons;
403
404 /* NUM_TX_RINGS = number of "next-page" entries
405 It will be used as a threshold */
406 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
407
408#ifdef BNX2X_STOP_ON_ERROR
409 WARN_ON(used < 0);
410 WARN_ON(used > fp->bp->tx_ring_size);
411 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
412#endif
413
414 return (s16)(fp->bp->tx_ring_size) - used;
415}
416
417static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
418{
419 u16 hw_cons;
420
421 /* Tell compiler that status block fields can change */
422 barrier();
423 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
424 return hw_cons != fp->tx_pkt_cons;
425}
426
427static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
429{
430 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
431 struct page *page = sw_buf->page;
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433
434 /* Skip "next page" elements */
435 if (!page)
436 return;
437
438 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
439 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
440 __free_pages(page, PAGES_PER_SGE_SHIFT);
441
442 sw_buf->page = NULL;
443 sge->addr_hi = 0;
444 sge->addr_lo = 0;
445}
446
447static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
448 struct bnx2x_fastpath *fp, int last)
449{
450 int i;
451
452 for (i = 0; i < last; i++)
453 bnx2x_free_rx_sge(bp, fp, i);
454}
455
456static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
457 struct bnx2x_fastpath *fp, u16 index)
458{
459 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
460 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
461 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
462 dma_addr_t mapping;
463
464 if (unlikely(page == NULL))
465 return -ENOMEM;
466
467 mapping = dma_map_page(&bp->pdev->dev, page, 0,
468 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
469 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
470 __free_pages(page, PAGES_PER_SGE_SHIFT);
471 return -ENOMEM;
472 }
473
474 sw_buf->page = page;
475 dma_unmap_addr_set(sw_buf, mapping, mapping);
476
477 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
478 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
479
480 return 0;
481}
482static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
483 struct bnx2x_fastpath *fp, u16 index)
484{
485 struct sk_buff *skb;
486 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
487 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
488 dma_addr_t mapping;
489
490 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
491 if (unlikely(skb == NULL))
492 return -ENOMEM;
493
494 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
495 DMA_FROM_DEVICE);
496 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
497 dev_kfree_skb(skb);
498 return -ENOMEM;
499 }
500
501 rx_buf->skb = skb;
502 dma_unmap_addr_set(rx_buf, mapping, mapping);
503
504 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
505 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
506
507 return 0;
508}
509
510/* note that we are not allocating a new skb,
511 * we are just moving one from cons to prod
512 * we are not creating a new mapping,
513 * so there is no need to check for dma_mapping_error().
514 */
515static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
516 struct sk_buff *skb, u16 cons, u16 prod)
517{
518 struct bnx2x *bp = fp->bp;
519 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
520 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
521 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
522 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
523
524 dma_sync_single_for_device(&bp->pdev->dev,
525 dma_unmap_addr(cons_rx_buf, mapping),
526 RX_COPY_THRESH, DMA_FROM_DEVICE);
527
528 prod_rx_buf->skb = cons_rx_buf->skb;
529 dma_unmap_addr_set(prod_rx_buf, mapping,
530 dma_unmap_addr(cons_rx_buf, mapping));
531 *prod_bd = *cons_bd;
532}
533
534static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
535{
536 int i, j;
537
538 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
539 int idx = RX_SGE_CNT * i - 1;
540
541 for (j = 0; j < 2; j++) {
542 SGE_MASK_CLEAR_BIT(fp, idx);
543 idx--;
544 }
545 }
546}
547
548static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
549{
550 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
551 memset(fp->sge_mask, 0xff,
552 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
553
554 /* Clear the two last indices in the page to 1:
555 these are the indices that correspond to the "next" element,
556 hence will never be indicated and should be removed from
557 the calculations. */
558 bnx2x_clear_sge_mask_next_elems(fp);
559}
560static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
561 struct bnx2x_fastpath *fp, int last)
562{
563 int i;
564
565 for (i = 0; i < last; i++) {
566 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
567 struct sk_buff *skb = rx_buf->skb;
568
569 if (skb == NULL) {
570 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
571 continue;
572 }
573
574 if (fp->tpa_state[i] == BNX2X_TPA_START)
575 dma_unmap_single(&bp->pdev->dev,
576 dma_unmap_addr(rx_buf, mapping),
577 bp->rx_buf_size, DMA_FROM_DEVICE);
578
579 dev_kfree_skb(skb);
580 rx_buf->skb = NULL;
581 }
582}
583
584
585static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
586{
587 int i, j;
588
589 for_each_queue(bp, j) {
590 struct bnx2x_fastpath *fp = &bp->fp[j];
591
592 for (i = 1; i <= NUM_TX_RINGS; i++) {
593 struct eth_tx_next_bd *tx_next_bd =
594 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
595
596 tx_next_bd->addr_hi =
597 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
598 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
599 tx_next_bd->addr_lo =
600 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
601 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
602 }
603
604 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
605 fp->tx_db.data.zero_fill1 = 0;
606 fp->tx_db.data.prod = 0;
607
608 fp->tx_pkt_prod = 0;
609 fp->tx_pkt_cons = 0;
610 fp->tx_bd_prod = 0;
611 fp->tx_bd_cons = 0;
612 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
613 fp->tx_pkt = 0;
614 }
615}
616static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
617{
618 u16 rx_cons_sb;
619
620 /* Tell compiler that status block fields can change */
621 barrier();
622 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
623 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
624 rx_cons_sb++;
625 return (fp->rx_comp_cons != rx_cons_sb);
626}
627
628/* HW Lock for shared dual port PHYs */
629void bnx2x_acquire_phy_lock(struct bnx2x *bp);
630void bnx2x_release_phy_lock(struct bnx2x *bp);
631
632void bnx2x_link_report(struct bnx2x *bp);
633int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
634int bnx2x_tx_int(struct bnx2x_fastpath *fp);
635void bnx2x_init_rx_rings(struct bnx2x *bp);
636netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
637
638int bnx2x_change_mac_addr(struct net_device *dev, void *p);
639void bnx2x_tx_timeout(struct net_device *dev);
640void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
641void bnx2x_netif_start(struct bnx2x *bp);
642void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
643void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
644int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
645int bnx2x_resume(struct pci_dev *pdev);
646void bnx2x_free_skbs(struct bnx2x *bp);
647int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
648int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
649int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
650int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
651
652#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
new file mode 100644
index 00000000000..3bb9a91bb3f
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -0,0 +1,534 @@
1/* bnx2x_dump.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10
11/* This struct holds a signature to ensure the dump returned from the driver
12 * match the meta data file inserted to grc_dump.tcl
13 * The signature is time stamp, diag version and grc_dump version
14 */
15
16#ifndef BNX2X_DUMP_H
17#define BNX2X_DUMP_H
18
19
20struct dump_sign {
21 u32 time_stamp;
22 u32 diag_ver;
23 u32 grc_dump_ver;
24};
25
26#define TSTORM_WAITP_ADDR 0x1b8a80
27#define CSTORM_WAITP_ADDR 0x238a80
28#define XSTORM_WAITP_ADDR 0x2b8a80
29#define USTORM_WAITP_ADDR 0x338a80
30#define TSTORM_CAM_MODE 0x1b1440
31
32#define RI_E1 0x1
33#define RI_E1H 0x2
34#define RI_ONLINE 0x100
35
36#define RI_E1_OFFLINE (RI_E1)
37#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
38#define RI_E1H_OFFLINE (RI_E1H)
39#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
40#define RI_ALL_OFFLINE (RI_E1 | RI_E1H)
41#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
42
43#define MAX_TIMER_PENDING 200
44#define TIMER_SCAN_DONT_CARE 0xFF
45
46
47struct dump_hdr {
48 u32 hdr_size; /* in dwords, excluding this field */
49 struct dump_sign dump_sign;
50 u32 xstorm_waitp;
51 u32 tstorm_waitp;
52 u32 ustorm_waitp;
53 u32 cstorm_waitp;
54 u16 info;
55 u8 idle_chk;
56 u8 reserved;
57};
58
59struct reg_addr {
60 u32 addr;
61 u32 size;
62 u16 info;
63};
64
65struct wreg_addr {
66 u32 addr;
67 u32 size;
68 u32 read_regs_count;
69 const u32 *read_regs;
70 u16 info;
71};
72
73
74#define REGS_COUNT 558
75static const struct reg_addr reg_addrs[REGS_COUNT] = {
76 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
77 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
78 { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
79 { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
80 { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
81 { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
82 { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
83 { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
84 { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
85 { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
86 { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
87 { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
88 { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
89 { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
90 { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
91 { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
92 { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
93 { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
94 { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
95 { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
96 { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
97 { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
98 { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
99 { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
100 { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
101 { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
102 { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
103 { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
104 { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
105 { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
106 { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
107 { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
108 { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
109 { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
110 { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
111 { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
112 { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
113 { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
114 { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
115 { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
116 { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
117 { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
118 { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
119 { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
120 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
121 { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
122 { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
123 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
124 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
125 { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
126 { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
127 { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
128 { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
129 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
130 { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
131 { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
132 { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
133 { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
134 { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
135 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
136 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
137 { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
138 { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
139 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
140 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
141 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
142 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
143 { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
144 { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
145 { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
146 { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
147 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
148 { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
149 { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
150 { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
151 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
152 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
153 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
154 { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
155 { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
156 { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
157 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
158 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
159 { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
160 { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
161 { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
162 { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
163 { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
164 { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
165 { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
166 { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
167 { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
168 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
169 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
170 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
171 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
172 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
173 { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
174 { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
175 { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
176 { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
177 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
178 { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
179 { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
180 { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
181 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
182 { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
183 { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
184 { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
185 { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
186 { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
187 { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
188 { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
189 { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
190 { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
191 { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
192 { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
193 { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
194 { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
195 { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
196 { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
197 { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
198 { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
199 { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
200 { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
201 { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
202 { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
203 { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
204 { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
205 { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
206 { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
207 { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
208 { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
209 { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
210 { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
211 { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
212 { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
213 { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
214 { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
215 { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
216 { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
217 { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
218 { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
219 { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
220 { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
221 { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
222 { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
223 { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
224 { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
225 { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
226 { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
227 { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
228 { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
229 { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
230 { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
231 { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
232 { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
233 { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
234 { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
235 { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
236 { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
237 { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
238 { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
239 { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
240 { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
241 { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
242 { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
243 { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
244 { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
245 { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
246 { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
247 { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
248 { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
249 { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
250 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
251 { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
252 { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
253 { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
254 { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
255 { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
256 { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
257 { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
258 { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
259 { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
260 { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
261 { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
262 { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
263 { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
264 { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
265 { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
266 { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
267 { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
268 { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
269 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
270 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
271 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
272 { 0x164238, 1, RI_ALL_ONLINE }, { 0x164240, 1, RI_ALL_ONLINE },
273 { 0x164248, 1, RI_ALL_ONLINE }, { 0x164250, 1, RI_ALL_ONLINE },
274 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
275 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
276 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
277 { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
278 { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
279 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
280 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
281 { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
282 { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
283 { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
284 { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
285 { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
286 { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
287 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
288 { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
289 { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
290 { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
291 { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
292 { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
293 { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
294 { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
295 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
296 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
297 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
298 { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
299 { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
300 { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
301 { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
302 { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
303 { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
304 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
305 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
306 { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
307 { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
308 { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
309 { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
310 { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
311 { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
312 { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
313 { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
314 { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
315 { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
316 { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
317 { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
318 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
319 { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
320 { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
321 { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
322 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
323 { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
324 { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
325 { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
326 { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
327 { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
328 { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
329 { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
330 { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
331 { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
332 { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
333 { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
334 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
335 { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
336 { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
337 { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
338 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
339 { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
340 { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
341 { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
342 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
343 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
344 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
345 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
346 { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
347 { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
348 { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
349 { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
350 { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
351 { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
352 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
353 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
354 { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
355};
356
357
358#define IDLE_REGS_COUNT 277
359static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
360 { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
361 { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
362 { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
363 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
364 { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
365 { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
366 { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
367 { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
368 { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
369 { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
370 { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
371 { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
372 { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
373 { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
374 { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
375 { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
376 { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
377 { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
378 { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
379 { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
380 { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
381 { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
382 { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
383 { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
384 { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
385 { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
386 { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
387 { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
388 { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
389 { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
390 { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
391 { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
392 { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
393 { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
394 { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
395 { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
396 { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
397 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
398 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
399 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
400 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
401 { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
402 { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
403 { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
404 { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
405 { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
406 { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
407 { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
408 { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
409 { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
410 { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
411 { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
412 { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
413 { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
414 { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
415 { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
416 { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
417 { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
418 { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
419 { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
420 { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
421 { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
422 { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
423 { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
424 { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
425 { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
426 { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
427 { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
428 { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
429 { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
430 { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
431 { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
432 { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
433 { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
434 { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
435 { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
436 { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
437 { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
438 { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
439 { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
440 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
441 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
442 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
443 { 0x120848, 1, RI_ALL_ONLINE }, { 0x120850, 1, RI_ALL_ONLINE },
444 { 0x120858, 1, RI_ALL_ONLINE }, { 0x120860, 1, RI_ALL_ONLINE },
445 { 0x120868, 1, RI_ALL_ONLINE }, { 0x120870, 1, RI_ALL_ONLINE },
446 { 0x120878, 1, RI_ALL_ONLINE }, { 0x120880, 1, RI_ALL_ONLINE },
447 { 0x120888, 1, RI_ALL_ONLINE }, { 0x120890, 1, RI_ALL_ONLINE },
448 { 0x120898, 1, RI_ALL_ONLINE }, { 0x1208a0, 1, RI_ALL_ONLINE },
449 { 0x1208a8, 1, RI_ALL_ONLINE }, { 0x1208b0, 1, RI_ALL_ONLINE },
450 { 0x1208b8, 1, RI_ALL_ONLINE }, { 0x1208c0, 1, RI_ALL_ONLINE },
451 { 0x1208c8, 1, RI_ALL_ONLINE }, { 0x1208d0, 1, RI_ALL_ONLINE },
452 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
453 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
454 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
455 { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
456 { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
457 { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
458 { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
459 { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
460 { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
461 { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
462 { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
463 { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
464 { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
465 { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
466 { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
467 { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
468 { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
469 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
470 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
471 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
472 { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
473 { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
474 { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
475 { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
476 { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
477 { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
478 { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
479 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
480 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
481 { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
482 { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
483 { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
484 { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
485 { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
486 { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
487 { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
488 { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
489 { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
490 { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
491 { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
492 { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
493 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
494 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
495 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
496 { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
497 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
498 { 0x3380c0, 1, RI_ALL_ONLINE }
499};
500
501#define WREGS_COUNT_E1 1
502static const u32 read_reg_e1_0[] = { 0x1b1000 };
503
504static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
505 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
506};
507
508
509#define WREGS_COUNT_E1H 1
510static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
511
512static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
513 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
514};
515
516
517static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
518
519
520#define TIMER_REGS_COUNT_E1 2
521static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
522 { 0x164014, 0x164018 };
523static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
524 { 0x1640d0, 0x1640d4 };
525
526
527#define TIMER_REGS_COUNT_E1H 2
528static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
529 { 0x164014, 0x164018 };
530static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
531 { 0x1640d0, 0x1640d4 };
532
533
534#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 00000000000..8b75b05e34c
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,1971 @@
1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#include <linux/ethtool.h>
18#include <linux/netdevice.h>
19#include <linux/types.h>
20#include <linux/sched.h>
21#include <linux/crc32.h>
22
23
24#include "bnx2x.h"
25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h"
27
28
29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
30{
31 struct bnx2x *bp = netdev_priv(dev);
32
33 cmd->supported = bp->port.supported;
34 cmd->advertising = bp->port.advertising;
35
36 if ((bp->state == BNX2X_STATE_OPEN) &&
37 !(bp->flags & MF_FUNC_DIS) &&
38 (bp->link_vars.link_up)) {
39 cmd->speed = bp->link_vars.line_speed;
40 cmd->duplex = bp->link_vars.duplex;
41 if (IS_E1HMF(bp)) {
42 u16 vn_max_rate;
43
44 vn_max_rate =
45 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
46 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
47 if (vn_max_rate < cmd->speed)
48 cmd->speed = vn_max_rate;
49 }
50 } else {
51 cmd->speed = -1;
52 cmd->duplex = -1;
53 }
54
55 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
56 u32 ext_phy_type =
57 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
58
59 switch (ext_phy_type) {
60 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
61 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
62 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
63 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
64 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
65 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
66 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
67 cmd->port = PORT_FIBRE;
68 break;
69
70 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
71 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
72 cmd->port = PORT_TP;
73 break;
74
75 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
76 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
77 bp->link_params.ext_phy_config);
78 break;
79
80 default:
81 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
82 bp->link_params.ext_phy_config);
83 break;
84 }
85 } else
86 cmd->port = PORT_TP;
87
88 cmd->phy_address = bp->mdio.prtad;
89 cmd->transceiver = XCVR_INTERNAL;
90
91 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
92 cmd->autoneg = AUTONEG_ENABLE;
93 else
94 cmd->autoneg = AUTONEG_DISABLE;
95
96 cmd->maxtxpkt = 0;
97 cmd->maxrxpkt = 0;
98
99 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
100 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
101 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
102 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
103 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
104 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
105 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
106
107 return 0;
108}
109
110static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
111{
112 struct bnx2x *bp = netdev_priv(dev);
113 u32 advertising;
114
115 if (IS_E1HMF(bp))
116 return 0;
117
118 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
119 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
120 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
121 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
122 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
123 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
124 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
125
126 if (cmd->autoneg == AUTONEG_ENABLE) {
127 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
128 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
129 return -EINVAL;
130 }
131
132 /* advertise the requested speed and duplex if supported */
133 cmd->advertising &= bp->port.supported;
134
135 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
136 bp->link_params.req_duplex = DUPLEX_FULL;
137 bp->port.advertising |= (ADVERTISED_Autoneg |
138 cmd->advertising);
139
140 } else { /* forced speed */
141 /* advertise the requested speed and duplex if supported */
142 switch (cmd->speed) {
143 case SPEED_10:
144 if (cmd->duplex == DUPLEX_FULL) {
145 if (!(bp->port.supported &
146 SUPPORTED_10baseT_Full)) {
147 DP(NETIF_MSG_LINK,
148 "10M full not supported\n");
149 return -EINVAL;
150 }
151
152 advertising = (ADVERTISED_10baseT_Full |
153 ADVERTISED_TP);
154 } else {
155 if (!(bp->port.supported &
156 SUPPORTED_10baseT_Half)) {
157 DP(NETIF_MSG_LINK,
158 "10M half not supported\n");
159 return -EINVAL;
160 }
161
162 advertising = (ADVERTISED_10baseT_Half |
163 ADVERTISED_TP);
164 }
165 break;
166
167 case SPEED_100:
168 if (cmd->duplex == DUPLEX_FULL) {
169 if (!(bp->port.supported &
170 SUPPORTED_100baseT_Full)) {
171 DP(NETIF_MSG_LINK,
172 "100M full not supported\n");
173 return -EINVAL;
174 }
175
176 advertising = (ADVERTISED_100baseT_Full |
177 ADVERTISED_TP);
178 } else {
179 if (!(bp->port.supported &
180 SUPPORTED_100baseT_Half)) {
181 DP(NETIF_MSG_LINK,
182 "100M half not supported\n");
183 return -EINVAL;
184 }
185
186 advertising = (ADVERTISED_100baseT_Half |
187 ADVERTISED_TP);
188 }
189 break;
190
191 case SPEED_1000:
192 if (cmd->duplex != DUPLEX_FULL) {
193 DP(NETIF_MSG_LINK, "1G half not supported\n");
194 return -EINVAL;
195 }
196
197 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
198 DP(NETIF_MSG_LINK, "1G full not supported\n");
199 return -EINVAL;
200 }
201
202 advertising = (ADVERTISED_1000baseT_Full |
203 ADVERTISED_TP);
204 break;
205
206 case SPEED_2500:
207 if (cmd->duplex != DUPLEX_FULL) {
208 DP(NETIF_MSG_LINK,
209 "2.5G half not supported\n");
210 return -EINVAL;
211 }
212
213 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
214 DP(NETIF_MSG_LINK,
215 "2.5G full not supported\n");
216 return -EINVAL;
217 }
218
219 advertising = (ADVERTISED_2500baseX_Full |
220 ADVERTISED_TP);
221 break;
222
223 case SPEED_10000:
224 if (cmd->duplex != DUPLEX_FULL) {
225 DP(NETIF_MSG_LINK, "10G half not supported\n");
226 return -EINVAL;
227 }
228
229 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
230 DP(NETIF_MSG_LINK, "10G full not supported\n");
231 return -EINVAL;
232 }
233
234 advertising = (ADVERTISED_10000baseT_Full |
235 ADVERTISED_FIBRE);
236 break;
237
238 default:
239 DP(NETIF_MSG_LINK, "Unsupported speed\n");
240 return -EINVAL;
241 }
242
243 bp->link_params.req_line_speed = cmd->speed;
244 bp->link_params.req_duplex = cmd->duplex;
245 bp->port.advertising = advertising;
246 }
247
248 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
249 DP_LEVEL " req_duplex %d advertising 0x%x\n",
250 bp->link_params.req_line_speed, bp->link_params.req_duplex,
251 bp->port.advertising);
252
253 if (netif_running(dev)) {
254 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
255 bnx2x_link_set(bp);
256 }
257
258 return 0;
259}
260
261#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
262#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
263
264static int bnx2x_get_regs_len(struct net_device *dev)
265{
266 struct bnx2x *bp = netdev_priv(dev);
267 int regdump_len = 0;
268 int i;
269
270 if (CHIP_IS_E1(bp)) {
271 for (i = 0; i < REGS_COUNT; i++)
272 if (IS_E1_ONLINE(reg_addrs[i].info))
273 regdump_len += reg_addrs[i].size;
274
275 for (i = 0; i < WREGS_COUNT_E1; i++)
276 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
277 regdump_len += wreg_addrs_e1[i].size *
278 (1 + wreg_addrs_e1[i].read_regs_count);
279
280 } else { /* E1H */
281 for (i = 0; i < REGS_COUNT; i++)
282 if (IS_E1H_ONLINE(reg_addrs[i].info))
283 regdump_len += reg_addrs[i].size;
284
285 for (i = 0; i < WREGS_COUNT_E1H; i++)
286 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
287 regdump_len += wreg_addrs_e1h[i].size *
288 (1 + wreg_addrs_e1h[i].read_regs_count);
289 }
290 regdump_len *= 4;
291 regdump_len += sizeof(struct dump_hdr);
292
293 return regdump_len;
294}
295
296static void bnx2x_get_regs(struct net_device *dev,
297 struct ethtool_regs *regs, void *_p)
298{
299 u32 *p = _p, i, j;
300 struct bnx2x *bp = netdev_priv(dev);
301 struct dump_hdr dump_hdr = {0};
302
303 regs->version = 0;
304 memset(p, 0, regs->len);
305
306 if (!netif_running(bp->dev))
307 return;
308
309 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
310 dump_hdr.dump_sign = dump_sign_all;
311 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
312 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
313 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
314 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
315 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
316
317 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
318 p += dump_hdr.hdr_size + 1;
319
320 if (CHIP_IS_E1(bp)) {
321 for (i = 0; i < REGS_COUNT; i++)
322 if (IS_E1_ONLINE(reg_addrs[i].info))
323 for (j = 0; j < reg_addrs[i].size; j++)
324 *p++ = REG_RD(bp,
325 reg_addrs[i].addr + j*4);
326
327 } else { /* E1H */
328 for (i = 0; i < REGS_COUNT; i++)
329 if (IS_E1H_ONLINE(reg_addrs[i].info))
330 for (j = 0; j < reg_addrs[i].size; j++)
331 *p++ = REG_RD(bp,
332 reg_addrs[i].addr + j*4);
333 }
334}
335
336#define PHY_FW_VER_LEN 10
337
338static void bnx2x_get_drvinfo(struct net_device *dev,
339 struct ethtool_drvinfo *info)
340{
341 struct bnx2x *bp = netdev_priv(dev);
342 u8 phy_fw_ver[PHY_FW_VER_LEN];
343
344 strcpy(info->driver, DRV_MODULE_NAME);
345 strcpy(info->version, DRV_MODULE_VERSION);
346
347 phy_fw_ver[0] = '\0';
348 if (bp->port.pmf) {
349 bnx2x_acquire_phy_lock(bp);
350 bnx2x_get_ext_phy_fw_version(&bp->link_params,
351 (bp->state != BNX2X_STATE_CLOSED),
352 phy_fw_ver, PHY_FW_VER_LEN);
353 bnx2x_release_phy_lock(bp);
354 }
355
356 strncpy(info->fw_version, bp->fw_ver, 32);
357 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
358 "bc %d.%d.%d%s%s",
359 (bp->common.bc_ver & 0xff0000) >> 16,
360 (bp->common.bc_ver & 0xff00) >> 8,
361 (bp->common.bc_ver & 0xff),
362 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
363 strcpy(info->bus_info, pci_name(bp->pdev));
364 info->n_stats = BNX2X_NUM_STATS;
365 info->testinfo_len = BNX2X_NUM_TESTS;
366 info->eedump_len = bp->common.flash_size;
367 info->regdump_len = bnx2x_get_regs_len(dev);
368}
369
370static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
371{
372 struct bnx2x *bp = netdev_priv(dev);
373
374 if (bp->flags & NO_WOL_FLAG) {
375 wol->supported = 0;
376 wol->wolopts = 0;
377 } else {
378 wol->supported = WAKE_MAGIC;
379 if (bp->wol)
380 wol->wolopts = WAKE_MAGIC;
381 else
382 wol->wolopts = 0;
383 }
384 memset(&wol->sopass, 0, sizeof(wol->sopass));
385}
386
387static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
388{
389 struct bnx2x *bp = netdev_priv(dev);
390
391 if (wol->wolopts & ~WAKE_MAGIC)
392 return -EINVAL;
393
394 if (wol->wolopts & WAKE_MAGIC) {
395 if (bp->flags & NO_WOL_FLAG)
396 return -EINVAL;
397
398 bp->wol = 1;
399 } else
400 bp->wol = 0;
401
402 return 0;
403}
404
405static u32 bnx2x_get_msglevel(struct net_device *dev)
406{
407 struct bnx2x *bp = netdev_priv(dev);
408
409 return bp->msg_enable;
410}
411
412static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
413{
414 struct bnx2x *bp = netdev_priv(dev);
415
416 if (capable(CAP_NET_ADMIN))
417 bp->msg_enable = level;
418}
419
420static int bnx2x_nway_reset(struct net_device *dev)
421{
422 struct bnx2x *bp = netdev_priv(dev);
423
424 if (!bp->port.pmf)
425 return 0;
426
427 if (netif_running(dev)) {
428 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
429 bnx2x_link_set(bp);
430 }
431
432 return 0;
433}
434
435static u32 bnx2x_get_link(struct net_device *dev)
436{
437 struct bnx2x *bp = netdev_priv(dev);
438
439 if (bp->flags & MF_FUNC_DIS)
440 return 0;
441
442 return bp->link_vars.link_up;
443}
444
445static int bnx2x_get_eeprom_len(struct net_device *dev)
446{
447 struct bnx2x *bp = netdev_priv(dev);
448
449 return bp->common.flash_size;
450}
451
452static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
453{
454 int port = BP_PORT(bp);
455 int count, i;
456 u32 val = 0;
457
458 /* adjust timeout for emulation/FPGA */
459 count = NVRAM_TIMEOUT_COUNT;
460 if (CHIP_REV_IS_SLOW(bp))
461 count *= 100;
462
463 /* request access to nvram interface */
464 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
465 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
466
467 for (i = 0; i < count*10; i++) {
468 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
469 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
470 break;
471
472 udelay(5);
473 }
474
475 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
476 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
477 return -EBUSY;
478 }
479
480 return 0;
481}
482
483static int bnx2x_release_nvram_lock(struct bnx2x *bp)
484{
485 int port = BP_PORT(bp);
486 int count, i;
487 u32 val = 0;
488
489 /* adjust timeout for emulation/FPGA */
490 count = NVRAM_TIMEOUT_COUNT;
491 if (CHIP_REV_IS_SLOW(bp))
492 count *= 100;
493
494 /* relinquish nvram interface */
495 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
496 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
497
498 for (i = 0; i < count*10; i++) {
499 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
500 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
501 break;
502
503 udelay(5);
504 }
505
506 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
507 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
508 return -EBUSY;
509 }
510
511 return 0;
512}
513
514static void bnx2x_enable_nvram_access(struct bnx2x *bp)
515{
516 u32 val;
517
518 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
519
520 /* enable both bits, even on read */
521 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
522 (val | MCPR_NVM_ACCESS_ENABLE_EN |
523 MCPR_NVM_ACCESS_ENABLE_WR_EN));
524}
525
526static void bnx2x_disable_nvram_access(struct bnx2x *bp)
527{
528 u32 val;
529
530 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
531
532 /* disable both bits, even after read */
533 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
534 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
535 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
536}
537
538static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
539 u32 cmd_flags)
540{
541 int count, i, rc;
542 u32 val;
543
544 /* build the command word */
545 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
546
547 /* need to clear DONE bit separately */
548 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
549
550 /* address of the NVRAM to read from */
551 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
552 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
553
554 /* issue a read command */
555 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
556
557 /* adjust timeout for emulation/FPGA */
558 count = NVRAM_TIMEOUT_COUNT;
559 if (CHIP_REV_IS_SLOW(bp))
560 count *= 100;
561
562 /* wait for completion */
563 *ret_val = 0;
564 rc = -EBUSY;
565 for (i = 0; i < count; i++) {
566 udelay(5);
567 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
568
569 if (val & MCPR_NVM_COMMAND_DONE) {
570 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
571 /* we read nvram data in cpu order
572 * but ethtool sees it as an array of bytes
573 * converting to big-endian will do the work */
574 *ret_val = cpu_to_be32(val);
575 rc = 0;
576 break;
577 }
578 }
579
580 return rc;
581}
582
583static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
584 int buf_size)
585{
586 int rc;
587 u32 cmd_flags;
588 __be32 val;
589
590 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
591 DP(BNX2X_MSG_NVM,
592 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
593 offset, buf_size);
594 return -EINVAL;
595 }
596
597 if (offset + buf_size > bp->common.flash_size) {
598 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
599 " buf_size (0x%x) > flash_size (0x%x)\n",
600 offset, buf_size, bp->common.flash_size);
601 return -EINVAL;
602 }
603
604 /* request access to nvram interface */
605 rc = bnx2x_acquire_nvram_lock(bp);
606 if (rc)
607 return rc;
608
609 /* enable access to nvram interface */
610 bnx2x_enable_nvram_access(bp);
611
612 /* read the first word(s) */
613 cmd_flags = MCPR_NVM_COMMAND_FIRST;
614 while ((buf_size > sizeof(u32)) && (rc == 0)) {
615 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
616 memcpy(ret_buf, &val, 4);
617
618 /* advance to the next dword */
619 offset += sizeof(u32);
620 ret_buf += sizeof(u32);
621 buf_size -= sizeof(u32);
622 cmd_flags = 0;
623 }
624
625 if (rc == 0) {
626 cmd_flags |= MCPR_NVM_COMMAND_LAST;
627 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
628 memcpy(ret_buf, &val, 4);
629 }
630
631 /* disable access to nvram interface */
632 bnx2x_disable_nvram_access(bp);
633 bnx2x_release_nvram_lock(bp);
634
635 return rc;
636}
637
638static int bnx2x_get_eeprom(struct net_device *dev,
639 struct ethtool_eeprom *eeprom, u8 *eebuf)
640{
641 struct bnx2x *bp = netdev_priv(dev);
642 int rc;
643
644 if (!netif_running(dev))
645 return -EAGAIN;
646
647 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
648 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
649 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
650 eeprom->len, eeprom->len);
651
652 /* parameters already validated in ethtool_get_eeprom */
653
654 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
655
656 return rc;
657}
658
659static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
660 u32 cmd_flags)
661{
662 int count, i, rc;
663
664 /* build the command word */
665 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
666
667 /* need to clear DONE bit separately */
668 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
669
670 /* write the data */
671 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
672
673 /* address of the NVRAM to write to */
674 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
675 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
676
677 /* issue the write command */
678 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
679
680 /* adjust timeout for emulation/FPGA */
681 count = NVRAM_TIMEOUT_COUNT;
682 if (CHIP_REV_IS_SLOW(bp))
683 count *= 100;
684
685 /* wait for completion */
686 rc = -EBUSY;
687 for (i = 0; i < count; i++) {
688 udelay(5);
689 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
690 if (val & MCPR_NVM_COMMAND_DONE) {
691 rc = 0;
692 break;
693 }
694 }
695
696 return rc;
697}
698
699#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
700
701static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
702 int buf_size)
703{
704 int rc;
705 u32 cmd_flags;
706 u32 align_offset;
707 __be32 val;
708
709 if (offset + buf_size > bp->common.flash_size) {
710 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
711 " buf_size (0x%x) > flash_size (0x%x)\n",
712 offset, buf_size, bp->common.flash_size);
713 return -EINVAL;
714 }
715
716 /* request access to nvram interface */
717 rc = bnx2x_acquire_nvram_lock(bp);
718 if (rc)
719 return rc;
720
721 /* enable access to nvram interface */
722 bnx2x_enable_nvram_access(bp);
723
724 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
725 align_offset = (offset & ~0x03);
726 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
727
728 if (rc == 0) {
729 val &= ~(0xff << BYTE_OFFSET(offset));
730 val |= (*data_buf << BYTE_OFFSET(offset));
731
732 /* nvram data is returned as an array of bytes
733 * convert it back to cpu order */
734 val = be32_to_cpu(val);
735
736 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
737 cmd_flags);
738 }
739
740 /* disable access to nvram interface */
741 bnx2x_disable_nvram_access(bp);
742 bnx2x_release_nvram_lock(bp);
743
744 return rc;
745}
746
747static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
748 int buf_size)
749{
750 int rc;
751 u32 cmd_flags;
752 u32 val;
753 u32 written_so_far;
754
755 if (buf_size == 1) /* ethtool */
756 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
757
758 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
759 DP(BNX2X_MSG_NVM,
760 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
761 offset, buf_size);
762 return -EINVAL;
763 }
764
765 if (offset + buf_size > bp->common.flash_size) {
766 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
767 " buf_size (0x%x) > flash_size (0x%x)\n",
768 offset, buf_size, bp->common.flash_size);
769 return -EINVAL;
770 }
771
772 /* request access to nvram interface */
773 rc = bnx2x_acquire_nvram_lock(bp);
774 if (rc)
775 return rc;
776
777 /* enable access to nvram interface */
778 bnx2x_enable_nvram_access(bp);
779
780 written_so_far = 0;
781 cmd_flags = MCPR_NVM_COMMAND_FIRST;
782 while ((written_so_far < buf_size) && (rc == 0)) {
783 if (written_so_far == (buf_size - sizeof(u32)))
784 cmd_flags |= MCPR_NVM_COMMAND_LAST;
785 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
786 cmd_flags |= MCPR_NVM_COMMAND_LAST;
787 else if ((offset % NVRAM_PAGE_SIZE) == 0)
788 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
789
790 memcpy(&val, data_buf, 4);
791
792 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
793
794 /* advance to the next dword */
795 offset += sizeof(u32);
796 data_buf += sizeof(u32);
797 written_so_far += sizeof(u32);
798 cmd_flags = 0;
799 }
800
801 /* disable access to nvram interface */
802 bnx2x_disable_nvram_access(bp);
803 bnx2x_release_nvram_lock(bp);
804
805 return rc;
806}
807
808static int bnx2x_set_eeprom(struct net_device *dev,
809 struct ethtool_eeprom *eeprom, u8 *eebuf)
810{
811 struct bnx2x *bp = netdev_priv(dev);
812 int port = BP_PORT(bp);
813 int rc = 0;
814
815 if (!netif_running(dev))
816 return -EAGAIN;
817
818 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
819 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
820 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
821 eeprom->len, eeprom->len);
822
823 /* parameters already validated in ethtool_set_eeprom */
824
825 /* PHY eeprom can be accessed only by the PMF */
826 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
827 !bp->port.pmf)
828 return -EINVAL;
829
830 if (eeprom->magic == 0x50485950) {
831 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
832 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
833
834 bnx2x_acquire_phy_lock(bp);
835 rc |= bnx2x_link_reset(&bp->link_params,
836 &bp->link_vars, 0);
837 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
838 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
839 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
840 MISC_REGISTERS_GPIO_HIGH, port);
841 bnx2x_release_phy_lock(bp);
842 bnx2x_link_report(bp);
843
844 } else if (eeprom->magic == 0x50485952) {
845 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
846 if (bp->state == BNX2X_STATE_OPEN) {
847 bnx2x_acquire_phy_lock(bp);
848 rc |= bnx2x_link_reset(&bp->link_params,
849 &bp->link_vars, 1);
850
851 rc |= bnx2x_phy_init(&bp->link_params,
852 &bp->link_vars);
853 bnx2x_release_phy_lock(bp);
854 bnx2x_calc_fc_adv(bp);
855 }
856 } else if (eeprom->magic == 0x53985943) {
857 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
858 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
859 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
860 u8 ext_phy_addr =
861 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
862
863 /* DSP Remove Download Mode */
864 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
865 MISC_REGISTERS_GPIO_LOW, port);
866
867 bnx2x_acquire_phy_lock(bp);
868
869 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
870
871 /* wait 0.5 sec to allow it to run */
872 msleep(500);
873 bnx2x_ext_phy_hw_reset(bp, port);
874 msleep(500);
875 bnx2x_release_phy_lock(bp);
876 }
877 } else
878 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
879
880 return rc;
881}
882static int bnx2x_get_coalesce(struct net_device *dev,
883 struct ethtool_coalesce *coal)
884{
885 struct bnx2x *bp = netdev_priv(dev);
886
887 memset(coal, 0, sizeof(struct ethtool_coalesce));
888
889 coal->rx_coalesce_usecs = bp->rx_ticks;
890 coal->tx_coalesce_usecs = bp->tx_ticks;
891
892 return 0;
893}
894
895static int bnx2x_set_coalesce(struct net_device *dev,
896 struct ethtool_coalesce *coal)
897{
898 struct bnx2x *bp = netdev_priv(dev);
899
900 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
901 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
902 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
903
904 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
905 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
906 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
907
908 if (netif_running(dev))
909 bnx2x_update_coalesce(bp);
910
911 return 0;
912}
913
914static void bnx2x_get_ringparam(struct net_device *dev,
915 struct ethtool_ringparam *ering)
916{
917 struct bnx2x *bp = netdev_priv(dev);
918
919 ering->rx_max_pending = MAX_RX_AVAIL;
920 ering->rx_mini_max_pending = 0;
921 ering->rx_jumbo_max_pending = 0;
922
923 ering->rx_pending = bp->rx_ring_size;
924 ering->rx_mini_pending = 0;
925 ering->rx_jumbo_pending = 0;
926
927 ering->tx_max_pending = MAX_TX_AVAIL;
928 ering->tx_pending = bp->tx_ring_size;
929}
930
931static int bnx2x_set_ringparam(struct net_device *dev,
932 struct ethtool_ringparam *ering)
933{
934 struct bnx2x *bp = netdev_priv(dev);
935 int rc = 0;
936
937 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
938 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
939 return -EAGAIN;
940 }
941
942 if ((ering->rx_pending > MAX_RX_AVAIL) ||
943 (ering->tx_pending > MAX_TX_AVAIL) ||
944 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
945 return -EINVAL;
946
947 bp->rx_ring_size = ering->rx_pending;
948 bp->tx_ring_size = ering->tx_pending;
949
950 if (netif_running(dev)) {
951 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
952 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
953 }
954
955 return rc;
956}
957
958static void bnx2x_get_pauseparam(struct net_device *dev,
959 struct ethtool_pauseparam *epause)
960{
961 struct bnx2x *bp = netdev_priv(dev);
962
963 epause->autoneg = (bp->link_params.req_flow_ctrl ==
964 BNX2X_FLOW_CTRL_AUTO) &&
965 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
966
967 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
968 BNX2X_FLOW_CTRL_RX);
969 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
970 BNX2X_FLOW_CTRL_TX);
971
972 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
973 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
974 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
975}
976
977static int bnx2x_set_pauseparam(struct net_device *dev,
978 struct ethtool_pauseparam *epause)
979{
980 struct bnx2x *bp = netdev_priv(dev);
981
982 if (IS_E1HMF(bp))
983 return 0;
984
985 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
986 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
987 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
988
989 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
990
991 if (epause->rx_pause)
992 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
993
994 if (epause->tx_pause)
995 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
996
997 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
998 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
999
1000 if (epause->autoneg) {
1001 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
1002 DP(NETIF_MSG_LINK, "autoneg not supported\n");
1003 return -EINVAL;
1004 }
1005
1006 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
1007 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
1008 }
1009
1010 DP(NETIF_MSG_LINK,
1011 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
1012
1013 if (netif_running(dev)) {
1014 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1015 bnx2x_link_set(bp);
1016 }
1017
1018 return 0;
1019}
1020
1021static int bnx2x_set_flags(struct net_device *dev, u32 data)
1022{
1023 struct bnx2x *bp = netdev_priv(dev);
1024 int changed = 0;
1025 int rc = 0;
1026
1027 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
1028 return -EINVAL;
1029
1030 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1031 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1032 return -EAGAIN;
1033 }
1034
1035 /* TPA requires Rx CSUM offloading */
1036 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
1037 if (!bp->disable_tpa) {
1038 if (!(dev->features & NETIF_F_LRO)) {
1039 dev->features |= NETIF_F_LRO;
1040 bp->flags |= TPA_ENABLE_FLAG;
1041 changed = 1;
1042 }
1043 } else
1044 rc = -EINVAL;
1045 } else if (dev->features & NETIF_F_LRO) {
1046 dev->features &= ~NETIF_F_LRO;
1047 bp->flags &= ~TPA_ENABLE_FLAG;
1048 changed = 1;
1049 }
1050
1051 if (data & ETH_FLAG_RXHASH)
1052 dev->features |= NETIF_F_RXHASH;
1053 else
1054 dev->features &= ~NETIF_F_RXHASH;
1055
1056 if (changed && netif_running(dev)) {
1057 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1058 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
1059 }
1060
1061 return rc;
1062}
1063
1064static u32 bnx2x_get_rx_csum(struct net_device *dev)
1065{
1066 struct bnx2x *bp = netdev_priv(dev);
1067
1068 return bp->rx_csum;
1069}
1070
1071static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
1072{
1073 struct bnx2x *bp = netdev_priv(dev);
1074 int rc = 0;
1075
1076 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1077 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1078 return -EAGAIN;
1079 }
1080
1081 bp->rx_csum = data;
1082
1083 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
1084 TPA'ed packets will be discarded due to wrong TCP CSUM */
1085 if (!data) {
1086 u32 flags = ethtool_op_get_flags(dev);
1087
1088 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
1089 }
1090
1091 return rc;
1092}
1093
1094static int bnx2x_set_tso(struct net_device *dev, u32 data)
1095{
1096 if (data) {
1097 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
1098 dev->features |= NETIF_F_TSO6;
1099 } else {
1100 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
1101 dev->features &= ~NETIF_F_TSO6;
1102 }
1103
1104 return 0;
1105}
1106
1107static const struct {
1108 char string[ETH_GSTRING_LEN];
1109} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
1110 { "register_test (offline)" },
1111 { "memory_test (offline)" },
1112 { "loopback_test (offline)" },
1113 { "nvram_test (online)" },
1114 { "interrupt_test (online)" },
1115 { "link_test (online)" },
1116 { "idle check (online)" }
1117};
1118
1119static int bnx2x_test_registers(struct bnx2x *bp)
1120{
1121 int idx, i, rc = -ENODEV;
1122 u32 wr_val = 0;
1123 int port = BP_PORT(bp);
1124 static const struct {
1125 u32 offset0;
1126 u32 offset1;
1127 u32 mask;
1128 } reg_tbl[] = {
1129/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
1130 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
1131 { HC_REG_AGG_INT_0, 4, 0x000003ff },
1132 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
1133 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
1134 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
1135 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
1136 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
1137 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
1138 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
1139/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
1140 { QM_REG_CONNNUM_0, 4, 0x000fffff },
1141 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
1142 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
1143 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
1144 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
1145 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
1146 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
1147 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
1148 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
1149/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
1150 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
1151 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
1152 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
1153 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
1154 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
1155 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
1156 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
1157 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
1158 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
1159/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
1160 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
1161 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
1162 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
1163 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
1164 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
1165 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
1166
1167 { 0xffffffff, 0, 0x00000000 }
1168 };
1169
1170 if (!netif_running(bp->dev))
1171 return rc;
1172
1173 /* Repeat the test twice:
1174 First by writing 0x00000000, second by writing 0xffffffff */
1175 for (idx = 0; idx < 2; idx++) {
1176
1177 switch (idx) {
1178 case 0:
1179 wr_val = 0;
1180 break;
1181 case 1:
1182 wr_val = 0xffffffff;
1183 break;
1184 }
1185
1186 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
1187 u32 offset, mask, save_val, val;
1188
1189 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
1190 mask = reg_tbl[i].mask;
1191
1192 save_val = REG_RD(bp, offset);
1193
1194 REG_WR(bp, offset, (wr_val & mask));
1195 val = REG_RD(bp, offset);
1196
1197 /* Restore the original register's value */
1198 REG_WR(bp, offset, save_val);
1199
1200 /* verify value is as expected */
1201 if ((val & mask) != (wr_val & mask)) {
1202 DP(NETIF_MSG_PROBE,
1203 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
1204 offset, val, wr_val, mask);
1205 goto test_reg_exit;
1206 }
1207 }
1208 }
1209
1210 rc = 0;
1211
1212test_reg_exit:
1213 return rc;
1214}
1215
1216static int bnx2x_test_memory(struct bnx2x *bp)
1217{
1218 int i, j, rc = -ENODEV;
1219 u32 val;
1220 static const struct {
1221 u32 offset;
1222 int size;
1223 } mem_tbl[] = {
1224 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
1225 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
1226 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
1227 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
1228 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
1229 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
1230 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
1231
1232 { 0xffffffff, 0 }
1233 };
1234 static const struct {
1235 char *name;
1236 u32 offset;
1237 u32 e1_mask;
1238 u32 e1h_mask;
1239 } prty_tbl[] = {
1240 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
1241 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
1242 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
1243 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
1244 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
1245 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
1246
1247 { NULL, 0xffffffff, 0, 0 }
1248 };
1249
1250 if (!netif_running(bp->dev))
1251 return rc;
1252
1253 /* Go through all the memories */
1254 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
1255 for (j = 0; j < mem_tbl[i].size; j++)
1256 REG_RD(bp, mem_tbl[i].offset + j*4);
1257
1258 /* Check the parity status */
1259 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1260 val = REG_RD(bp, prty_tbl[i].offset);
1261 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1262 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
1263 DP(NETIF_MSG_HW,
1264 "%s is 0x%x\n", prty_tbl[i].name, val);
1265 goto test_mem_exit;
1266 }
1267 }
1268
1269 rc = 0;
1270
1271test_mem_exit:
1272 return rc;
1273}
1274
1275static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
1276{
1277 int cnt = 1000;
1278
1279 if (link_up)
1280 while (bnx2x_link_test(bp) && cnt--)
1281 msleep(10);
1282}
1283
1284static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1285{
1286 unsigned int pkt_size, num_pkts, i;
1287 struct sk_buff *skb;
1288 unsigned char *packet;
1289 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
1290 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
1291 u16 tx_start_idx, tx_idx;
1292 u16 rx_start_idx, rx_idx;
1293 u16 pkt_prod, bd_prod;
1294 struct sw_tx_bd *tx_buf;
1295 struct eth_tx_start_bd *tx_start_bd;
1296 struct eth_tx_parse_bd *pbd = NULL;
1297 dma_addr_t mapping;
1298 union eth_rx_cqe *cqe;
1299 u8 cqe_fp_flags;
1300 struct sw_rx_bd *rx_buf;
1301 u16 len;
1302 int rc = -ENODEV;
1303
1304 /* check the loopback mode */
1305 switch (loopback_mode) {
1306 case BNX2X_PHY_LOOPBACK:
1307 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
1308 return -EINVAL;
1309 break;
1310 case BNX2X_MAC_LOOPBACK:
1311 bp->link_params.loopback_mode = LOOPBACK_BMAC;
1312 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1313 break;
1314 default:
1315 return -EINVAL;
1316 }
1317
1318 /* prepare the loopback packet */
1319 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1320 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1321 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1322 if (!skb) {
1323 rc = -ENOMEM;
1324 goto test_loopback_exit;
1325 }
1326 packet = skb_put(skb, pkt_size);
1327 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
1328 memset(packet + ETH_ALEN, 0, ETH_ALEN);
1329 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
1330 for (i = ETH_HLEN; i < pkt_size; i++)
1331 packet[i] = (unsigned char) (i & 0xff);
1332
1333 /* send the loopback packet */
1334 num_pkts = 0;
1335 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
1336 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1337
1338 pkt_prod = fp_tx->tx_pkt_prod++;
1339 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
1340 tx_buf->first_bd = fp_tx->tx_bd_prod;
1341 tx_buf->skb = skb;
1342 tx_buf->flags = 0;
1343
1344 bd_prod = TX_BD(fp_tx->tx_bd_prod);
1345 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1346 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1347 skb_headlen(skb), DMA_TO_DEVICE);
1348 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1349 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1350 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
1351 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1352 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1353 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1354 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
1355 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
1356
1357 /* turn on parsing and get a BD */
1358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1359 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
1360
1361 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1362
1363 wmb();
1364
1365 fp_tx->tx_db.data.prod += 2;
1366 barrier();
1367 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
1368
1369 mmiowb();
1370
1371 num_pkts++;
1372 fp_tx->tx_bd_prod += 2; /* start + pbd */
1373
1374 udelay(100);
1375
1376 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
1377 if (tx_idx != tx_start_idx + num_pkts)
1378 goto test_loopback_exit;
1379
1380 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1381 if (rx_idx != rx_start_idx + num_pkts)
1382 goto test_loopback_exit;
1383
1384 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
1385 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1386 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
1387 goto test_loopback_rx_exit;
1388
1389 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1390 if (len != pkt_size)
1391 goto test_loopback_rx_exit;
1392
1393 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
1394 skb = rx_buf->skb;
1395 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
1396 for (i = ETH_HLEN; i < pkt_size; i++)
1397 if (*(skb->data + i) != (unsigned char) (i & 0xff))
1398 goto test_loopback_rx_exit;
1399
1400 rc = 0;
1401
1402test_loopback_rx_exit:
1403
1404 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
1405 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
1406 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
1407 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
1408
1409 /* Update producers */
1410 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
1411 fp_rx->rx_sge_prod);
1412
1413test_loopback_exit:
1414 bp->link_params.loopback_mode = LOOPBACK_NONE;
1415
1416 return rc;
1417}
1418
1419static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
1420{
1421 int rc = 0, res;
1422
1423 if (BP_NOMCP(bp))
1424 return rc;
1425
1426 if (!netif_running(bp->dev))
1427 return BNX2X_LOOPBACK_FAILED;
1428
1429 bnx2x_netif_stop(bp, 1);
1430 bnx2x_acquire_phy_lock(bp);
1431
1432 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
1433 if (res) {
1434 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
1435 rc |= BNX2X_PHY_LOOPBACK_FAILED;
1436 }
1437
1438 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
1439 if (res) {
1440 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
1441 rc |= BNX2X_MAC_LOOPBACK_FAILED;
1442 }
1443
1444 bnx2x_release_phy_lock(bp);
1445 bnx2x_netif_start(bp);
1446
1447 return rc;
1448}
1449
1450#define CRC32_RESIDUAL 0xdebb20e3
1451
1452static int bnx2x_test_nvram(struct bnx2x *bp)
1453{
1454 static const struct {
1455 int offset;
1456 int size;
1457 } nvram_tbl[] = {
1458 { 0, 0x14 }, /* bootstrap */
1459 { 0x14, 0xec }, /* dir */
1460 { 0x100, 0x350 }, /* manuf_info */
1461 { 0x450, 0xf0 }, /* feature_info */
1462 { 0x640, 0x64 }, /* upgrade_key_info */
1463 { 0x6a4, 0x64 },
1464 { 0x708, 0x70 }, /* manuf_key_info */
1465 { 0x778, 0x70 },
1466 { 0, 0 }
1467 };
1468 __be32 buf[0x350 / 4];
1469 u8 *data = (u8 *)buf;
1470 int i, rc;
1471 u32 magic, crc;
1472
1473 if (BP_NOMCP(bp))
1474 return 0;
1475
1476 rc = bnx2x_nvram_read(bp, 0, data, 4);
1477 if (rc) {
1478 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
1479 goto test_nvram_exit;
1480 }
1481
1482 magic = be32_to_cpu(buf[0]);
1483 if (magic != 0x669955aa) {
1484 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
1485 rc = -ENODEV;
1486 goto test_nvram_exit;
1487 }
1488
1489 for (i = 0; nvram_tbl[i].size; i++) {
1490
1491 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
1492 nvram_tbl[i].size);
1493 if (rc) {
1494 DP(NETIF_MSG_PROBE,
1495 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
1496 goto test_nvram_exit;
1497 }
1498
1499 crc = ether_crc_le(nvram_tbl[i].size, data);
1500 if (crc != CRC32_RESIDUAL) {
1501 DP(NETIF_MSG_PROBE,
1502 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
1503 rc = -ENODEV;
1504 goto test_nvram_exit;
1505 }
1506 }
1507
1508test_nvram_exit:
1509 return rc;
1510}
1511
1512static int bnx2x_test_intr(struct bnx2x *bp)
1513{
1514 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
1515 int i, rc;
1516
1517 if (!netif_running(bp->dev))
1518 return -ENODEV;
1519
1520 config->hdr.length = 0;
1521 if (CHIP_IS_E1(bp))
1522 /* use last unicast entries */
1523 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
1524 else
1525 config->hdr.offset = BP_FUNC(bp);
1526 config->hdr.client_id = bp->fp->cl_id;
1527 config->hdr.reserved1 = 0;
1528
1529 bp->set_mac_pending++;
1530 smp_wmb();
1531 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
1532 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
1533 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
1534 if (rc == 0) {
1535 for (i = 0; i < 10; i++) {
1536 if (!bp->set_mac_pending)
1537 break;
1538 smp_rmb();
1539 msleep_interruptible(10);
1540 }
1541 if (i == 10)
1542 rc = -ENODEV;
1543 }
1544
1545 return rc;
1546}
1547
1548static void bnx2x_self_test(struct net_device *dev,
1549 struct ethtool_test *etest, u64 *buf)
1550{
1551 struct bnx2x *bp = netdev_priv(dev);
1552
1553 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1554 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1555 etest->flags |= ETH_TEST_FL_FAILED;
1556 return;
1557 }
1558
1559 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
1560
1561 if (!netif_running(dev))
1562 return;
1563
1564 /* offline tests are not supported in MF mode */
1565 if (IS_E1HMF(bp))
1566 etest->flags &= ~ETH_TEST_FL_OFFLINE;
1567
1568 if (etest->flags & ETH_TEST_FL_OFFLINE) {
1569 int port = BP_PORT(bp);
1570 u32 val;
1571 u8 link_up;
1572
1573 /* save current value of input enable for TX port IF */
1574 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
1575 /* disable input for TX port IF */
1576 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
1577
1578 link_up = (bnx2x_link_test(bp) == 0);
1579 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1580 bnx2x_nic_load(bp, LOAD_DIAG);
1581 /* wait until link state is restored */
1582 bnx2x_wait_for_link(bp, link_up);
1583
1584 if (bnx2x_test_registers(bp) != 0) {
1585 buf[0] = 1;
1586 etest->flags |= ETH_TEST_FL_FAILED;
1587 }
1588 if (bnx2x_test_memory(bp) != 0) {
1589 buf[1] = 1;
1590 etest->flags |= ETH_TEST_FL_FAILED;
1591 }
1592 buf[2] = bnx2x_test_loopback(bp, link_up);
1593 if (buf[2] != 0)
1594 etest->flags |= ETH_TEST_FL_FAILED;
1595
1596 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1597
1598 /* restore input for TX port IF */
1599 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
1600
1601 bnx2x_nic_load(bp, LOAD_NORMAL);
1602 /* wait until link state is restored */
1603 bnx2x_wait_for_link(bp, link_up);
1604 }
1605 if (bnx2x_test_nvram(bp) != 0) {
1606 buf[3] = 1;
1607 etest->flags |= ETH_TEST_FL_FAILED;
1608 }
1609 if (bnx2x_test_intr(bp) != 0) {
1610 buf[4] = 1;
1611 etest->flags |= ETH_TEST_FL_FAILED;
1612 }
1613 if (bp->port.pmf)
1614 if (bnx2x_link_test(bp) != 0) {
1615 buf[5] = 1;
1616 etest->flags |= ETH_TEST_FL_FAILED;
1617 }
1618
1619#ifdef BNX2X_EXTRA_DEBUG
1620 bnx2x_panic_dump(bp);
1621#endif
1622}
1623
1624static const struct {
1625 long offset;
1626 int size;
1627 u8 string[ETH_GSTRING_LEN];
1628} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
1629/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
1630 { Q_STATS_OFFSET32(error_bytes_received_hi),
1631 8, "[%d]: rx_error_bytes" },
1632 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
1633 8, "[%d]: rx_ucast_packets" },
1634 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
1635 8, "[%d]: rx_mcast_packets" },
1636 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
1637 8, "[%d]: rx_bcast_packets" },
1638 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
1639 { Q_STATS_OFFSET32(rx_err_discard_pkt),
1640 4, "[%d]: rx_phy_ip_err_discards"},
1641 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
1642 4, "[%d]: rx_skb_alloc_discard" },
1643 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
1644
1645/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
1646 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1647 8, "[%d]: tx_ucast_packets" },
1648 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1649 8, "[%d]: tx_mcast_packets" },
1650 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1651 8, "[%d]: tx_bcast_packets" }
1652};
1653
1654static const struct {
1655 long offset;
1656 int size;
1657 u32 flags;
1658#define STATS_FLAGS_PORT 1
1659#define STATS_FLAGS_FUNC 2
1660#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
1661 u8 string[ETH_GSTRING_LEN];
1662} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
1663/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
1664 8, STATS_FLAGS_BOTH, "rx_bytes" },
1665 { STATS_OFFSET32(error_bytes_received_hi),
1666 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
1667 { STATS_OFFSET32(total_unicast_packets_received_hi),
1668 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
1669 { STATS_OFFSET32(total_multicast_packets_received_hi),
1670 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
1671 { STATS_OFFSET32(total_broadcast_packets_received_hi),
1672 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
1673 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
1674 8, STATS_FLAGS_PORT, "rx_crc_errors" },
1675 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
1676 8, STATS_FLAGS_PORT, "rx_align_errors" },
1677 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
1678 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
1679 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
1680 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
1681/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
1682 8, STATS_FLAGS_PORT, "rx_fragments" },
1683 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
1684 8, STATS_FLAGS_PORT, "rx_jabbers" },
1685 { STATS_OFFSET32(no_buff_discard_hi),
1686 8, STATS_FLAGS_BOTH, "rx_discards" },
1687 { STATS_OFFSET32(mac_filter_discard),
1688 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
1689 { STATS_OFFSET32(xxoverflow_discard),
1690 4, STATS_FLAGS_PORT, "rx_fw_discards" },
1691 { STATS_OFFSET32(brb_drop_hi),
1692 8, STATS_FLAGS_PORT, "rx_brb_discard" },
1693 { STATS_OFFSET32(brb_truncate_hi),
1694 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
1695 { STATS_OFFSET32(pause_frames_received_hi),
1696 8, STATS_FLAGS_PORT, "rx_pause_frames" },
1697 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
1698 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
1699 { STATS_OFFSET32(nig_timer_max),
1700 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
1701/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
1702 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
1703 { STATS_OFFSET32(rx_skb_alloc_failed),
1704 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
1705 { STATS_OFFSET32(hw_csum_err),
1706 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
1707
1708 { STATS_OFFSET32(total_bytes_transmitted_hi),
1709 8, STATS_FLAGS_BOTH, "tx_bytes" },
1710 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
1711 8, STATS_FLAGS_PORT, "tx_error_bytes" },
1712 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1713 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
1714 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1715 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
1716 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1717 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
1718 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
1719 8, STATS_FLAGS_PORT, "tx_mac_errors" },
1720 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
1721 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
1722/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
1723 8, STATS_FLAGS_PORT, "tx_single_collisions" },
1724 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
1725 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
1726 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
1727 8, STATS_FLAGS_PORT, "tx_deferred" },
1728 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
1729 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
1730 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
1731 8, STATS_FLAGS_PORT, "tx_late_collisions" },
1732 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
1733 8, STATS_FLAGS_PORT, "tx_total_collisions" },
1734 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
1735 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
1736 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
1737 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
1738 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
1739 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
1740 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
1741 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
1742/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
1743 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
1744 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
1745 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
1746 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
1747 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
1748 { STATS_OFFSET32(pause_frames_sent_hi),
1749 8, STATS_FLAGS_PORT, "tx_pause_frames" }
1750};
1751
1752#define IS_PORT_STAT(i) \
1753 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
1754#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
1755#define IS_E1HMF_MODE_STAT(bp) \
1756 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
1757
1758static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
1759{
1760 struct bnx2x *bp = netdev_priv(dev);
1761 int i, num_stats;
1762
1763 switch (stringset) {
1764 case ETH_SS_STATS:
1765 if (is_multi(bp)) {
1766 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
1767 if (!IS_E1HMF_MODE_STAT(bp))
1768 num_stats += BNX2X_NUM_STATS;
1769 } else {
1770 if (IS_E1HMF_MODE_STAT(bp)) {
1771 num_stats = 0;
1772 for (i = 0; i < BNX2X_NUM_STATS; i++)
1773 if (IS_FUNC_STAT(i))
1774 num_stats++;
1775 } else
1776 num_stats = BNX2X_NUM_STATS;
1777 }
1778 return num_stats;
1779
1780 case ETH_SS_TEST:
1781 return BNX2X_NUM_TESTS;
1782
1783 default:
1784 return -EINVAL;
1785 }
1786}
1787
1788static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1789{
1790 struct bnx2x *bp = netdev_priv(dev);
1791 int i, j, k;
1792
1793 switch (stringset) {
1794 case ETH_SS_STATS:
1795 if (is_multi(bp)) {
1796 k = 0;
1797 for_each_queue(bp, i) {
1798 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
1799 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
1800 bnx2x_q_stats_arr[j].string, i);
1801 k += BNX2X_NUM_Q_STATS;
1802 }
1803 if (IS_E1HMF_MODE_STAT(bp))
1804 break;
1805 for (j = 0; j < BNX2X_NUM_STATS; j++)
1806 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
1807 bnx2x_stats_arr[j].string);
1808 } else {
1809 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1810 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
1811 continue;
1812 strcpy(buf + j*ETH_GSTRING_LEN,
1813 bnx2x_stats_arr[i].string);
1814 j++;
1815 }
1816 }
1817 break;
1818
1819 case ETH_SS_TEST:
1820 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
1821 break;
1822 }
1823}
1824
1825static void bnx2x_get_ethtool_stats(struct net_device *dev,
1826 struct ethtool_stats *stats, u64 *buf)
1827{
1828 struct bnx2x *bp = netdev_priv(dev);
1829 u32 *hw_stats, *offset;
1830 int i, j, k;
1831
1832 if (is_multi(bp)) {
1833 k = 0;
1834 for_each_queue(bp, i) {
1835 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
1836 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
1837 if (bnx2x_q_stats_arr[j].size == 0) {
1838 /* skip this counter */
1839 buf[k + j] = 0;
1840 continue;
1841 }
1842 offset = (hw_stats +
1843 bnx2x_q_stats_arr[j].offset);
1844 if (bnx2x_q_stats_arr[j].size == 4) {
1845 /* 4-byte counter */
1846 buf[k + j] = (u64) *offset;
1847 continue;
1848 }
1849 /* 8-byte counter */
1850 buf[k + j] = HILO_U64(*offset, *(offset + 1));
1851 }
1852 k += BNX2X_NUM_Q_STATS;
1853 }
1854 if (IS_E1HMF_MODE_STAT(bp))
1855 return;
1856 hw_stats = (u32 *)&bp->eth_stats;
1857 for (j = 0; j < BNX2X_NUM_STATS; j++) {
1858 if (bnx2x_stats_arr[j].size == 0) {
1859 /* skip this counter */
1860 buf[k + j] = 0;
1861 continue;
1862 }
1863 offset = (hw_stats + bnx2x_stats_arr[j].offset);
1864 if (bnx2x_stats_arr[j].size == 4) {
1865 /* 4-byte counter */
1866 buf[k + j] = (u64) *offset;
1867 continue;
1868 }
1869 /* 8-byte counter */
1870 buf[k + j] = HILO_U64(*offset, *(offset + 1));
1871 }
1872 } else {
1873 hw_stats = (u32 *)&bp->eth_stats;
1874 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1875 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
1876 continue;
1877 if (bnx2x_stats_arr[i].size == 0) {
1878 /* skip this counter */
1879 buf[j] = 0;
1880 j++;
1881 continue;
1882 }
1883 offset = (hw_stats + bnx2x_stats_arr[i].offset);
1884 if (bnx2x_stats_arr[i].size == 4) {
1885 /* 4-byte counter */
1886 buf[j] = (u64) *offset;
1887 j++;
1888 continue;
1889 }
1890 /* 8-byte counter */
1891 buf[j] = HILO_U64(*offset, *(offset + 1));
1892 j++;
1893 }
1894 }
1895}
1896
1897static int bnx2x_phys_id(struct net_device *dev, u32 data)
1898{
1899 struct bnx2x *bp = netdev_priv(dev);
1900 int i;
1901
1902 if (!netif_running(dev))
1903 return 0;
1904
1905 if (!bp->port.pmf)
1906 return 0;
1907
1908 if (data == 0)
1909 data = 2;
1910
1911 for (i = 0; i < (data * 2); i++) {
1912 if ((i % 2) == 0)
1913 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
1914 SPEED_1000);
1915 else
1916 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
1917
1918 msleep_interruptible(500);
1919 if (signal_pending(current))
1920 break;
1921 }
1922
1923 if (bp->link_vars.link_up)
1924 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
1925 bp->link_vars.line_speed);
1926
1927 return 0;
1928}
1929
1930static const struct ethtool_ops bnx2x_ethtool_ops = {
1931 .get_settings = bnx2x_get_settings,
1932 .set_settings = bnx2x_set_settings,
1933 .get_drvinfo = bnx2x_get_drvinfo,
1934 .get_regs_len = bnx2x_get_regs_len,
1935 .get_regs = bnx2x_get_regs,
1936 .get_wol = bnx2x_get_wol,
1937 .set_wol = bnx2x_set_wol,
1938 .get_msglevel = bnx2x_get_msglevel,
1939 .set_msglevel = bnx2x_set_msglevel,
1940 .nway_reset = bnx2x_nway_reset,
1941 .get_link = bnx2x_get_link,
1942 .get_eeprom_len = bnx2x_get_eeprom_len,
1943 .get_eeprom = bnx2x_get_eeprom,
1944 .set_eeprom = bnx2x_set_eeprom,
1945 .get_coalesce = bnx2x_get_coalesce,
1946 .set_coalesce = bnx2x_set_coalesce,
1947 .get_ringparam = bnx2x_get_ringparam,
1948 .set_ringparam = bnx2x_set_ringparam,
1949 .get_pauseparam = bnx2x_get_pauseparam,
1950 .set_pauseparam = bnx2x_set_pauseparam,
1951 .get_rx_csum = bnx2x_get_rx_csum,
1952 .set_rx_csum = bnx2x_set_rx_csum,
1953 .get_tx_csum = ethtool_op_get_tx_csum,
1954 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1955 .set_flags = bnx2x_set_flags,
1956 .get_flags = ethtool_op_get_flags,
1957 .get_sg = ethtool_op_get_sg,
1958 .set_sg = ethtool_op_set_sg,
1959 .get_tso = ethtool_op_get_tso,
1960 .set_tso = bnx2x_set_tso,
1961 .self_test = bnx2x_self_test,
1962 .get_sset_count = bnx2x_get_sset_count,
1963 .get_strings = bnx2x_get_strings,
1964 .phys_id = bnx2x_phys_id,
1965 .get_ethtool_stats = bnx2x_get_ethtool_stats,
1966};
1967
1968void bnx2x_set_ethtool_ops(struct net_device *netdev)
1969{
1970 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
1971}
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
new file mode 100644
index 00000000000..08d71bf438d
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -0,0 +1,594 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
12 (IS_E1H_OFFSET ? 0x7000 : 0x1000)
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \
14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
15#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \
16 (IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \
17 ((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \
18 0x40) + (index * 0x4)))
19#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \
20 (IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \
21 ((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \
22 0x80) + (index * 0x4)))
23#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \
24 (IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \
25 ((function&1) * 0x100)) : (0x3540 + (function * 0x40)))
26#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \
27 (IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \
28 ((function&1) * 0x200)) : (0x35c0 + (function * 0x80)))
29#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \
30 (IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \
31 ((function&1) * 0x100)) : (0x3548 + (function * 0x40)))
32#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \
33 (IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \
34 ((function&1) * 0x200)) : (0x35c8 + (function * 0x80)))
35#define CSTORM_FUNCTION_MODE_OFFSET \
36 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
37#define CSTORM_HC_BTR_C_OFFSET(port) \
38 (IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0)))
39#define CSTORM_HC_BTR_U_OFFSET(port) \
40 (IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0)))
41#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \
42 (IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \
43 (function * 0x8)))
44#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
45 (IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \
46 (function * 0x8)))
47#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \
48 (IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \
49 (0x2410 + (function * 0xc0) + (eqIdx * 0x18)))
50#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \
51 (IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \
52 (0x2414 + (function * 0xc0) + (eqIdx * 0x18)))
53#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \
54 (IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \
55 (0x241c + (function * 0xc0) + (eqIdx * 0x18)))
56#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \
57 (IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \
58 (0x2427 + (function * 0xc0) + (eqIdx * 0x18)))
59#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \
60 (IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \
61 (0x2412 + (function * 0xc0) + (eqIdx * 0x18)))
62#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \
63 (IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \
64 (0x2426 + (function * 0xc0) + (eqIdx * 0x18)))
65#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \
66 (IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \
67 (0x2424 + (function * 0xc0) + (eqIdx * 0x18)))
68#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
69 (IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \
70 (function * 0x8)))
71#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
72 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \
73 (function * 0x8)))
74#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
75 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \
76 (function * 0x8)))
77#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
78 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \
79 (function * 0x8)))
80#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \
81 (IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \
82 (index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \
83 (index * 0x4)))
84#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \
85 (IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \
86 (index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \
87 (index * 0x4)))
88#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \
89 (IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \
90 (index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \
91 (index * 0x4)))
92#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \
93 (IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \
94 (index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \
95 (index * 0x4)))
96#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \
97 (IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \
98 (0x3040 + (port * 0x280) + (cpu_id * 0x28)))
99#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
100 (IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
101 (0x4000 + (port * 0x800) + (cpu_id * 0x80)))
102#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
103 (IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
104 (0x3048 + (port * 0x280) + (cpu_id * 0x28)))
105#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
106 (IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
107 (0x4008 + (port * 0x800) + (cpu_id * 0x80)))
108#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
109#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
110#define CSTORM_STATS_FLAGS_OFFSET(function) \
111 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
112 (function * 0x8)))
113#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
114 (IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
115#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
116 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
117#define TSTORM_ASSERT_LIST_OFFSET(idx) \
118 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
119#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
120 (IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
121 : (0x9c0 + (port * 0x120) + (client_id * 0x10)))
122#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
123 (IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
124#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
125 (IS_E1H_OFFSET ? 0x1eda : 0xffffffff)
126#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
127 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
128 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
129 0x28) + (index * 0x4)))
130#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
131 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
132 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
133#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
134 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
135 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
136#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
137 (IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \
138 (function * 0x8)))
139#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
140 (IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \
141 (function * 0x40)))
142#define TSTORM_FUNCTION_MODE_OFFSET \
143 (IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff)
144#define TSTORM_HC_BTR_OFFSET(port) \
145 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
146#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
147 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
148 (function * 0x80)))
149#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
150#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \
151 (IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \
152 : (0x4c30 + (function * 0x40) + (pblEntry * 0x8)))
153#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
154 (IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \
155 (function * 0x8)))
156#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
157 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \
158 (function * 0x8)))
159#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
160 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \
161 (function * 0x8)))
162#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
163 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \
164 (function * 0x8)))
165#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \
166 (IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \
167 (function * 0x8)))
168#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
169 (IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \
170 (function * 0x8)))
171#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \
172 (IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \
173 (function * 0x8)))
174#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \
175 (IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \
176 (function * 0x8)))
177#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
178 (IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \
179 (function * 0x40)))
180#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
181 (IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \
182 0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40)))
183#define TSTORM_STATS_FLAGS_OFFSET(function) \
184 (IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \
185 (function * 0x8)))
186#define TSTORM_TCP_MAX_CWND_OFFSET(function) \
187 (IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \
188 (function * 0x8)))
189#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000)
190#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000)
191#define USTORM_ASSERT_LIST_INDEX_OFFSET \
192 (IS_E1H_OFFSET ? 0x8000 : 0x1000)
193#define USTORM_ASSERT_LIST_OFFSET(idx) \
194 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
195#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
196 (IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \
197 (0x4010 + (port * 0x360) + (clientId * 0x30)))
198#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \
199 (IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \
200 (0x4028 + (port * 0x360) + (clientId * 0x30)))
201#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \
202 (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff)
203#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \
204 (IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \
205 0xffffffff)
206#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
207 (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \
208 (function * 0x8)))
209#define USTORM_FUNCTION_MODE_OFFSET \
210 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
211#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \
212 (IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \
213 (function * 0x8)))
214#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
215 (IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \
216 (function * 0x8)))
217#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
218 (IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \
219 (function * 0x8)))
220#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \
221 (IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \
222 (function * 0x8)))
223#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
224 (IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \
225 (function * 0x8)))
226#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
227 (IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \
228 (function * 0x8)))
229#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
230 (IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \
231 (function * 0x8)))
232#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
233 (IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \
234 (function * 0x8)))
235#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \
236 (IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \
237 (function * 0x8)))
238#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \
239 (IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \
240 (function * 0x8)))
241#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
242 (IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \
243 (0x4018 + (port * 0x360) + (clientId * 0x30)))
244#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
245 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \
246 (function * 0x8)))
247#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
248 (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \
249 0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28)))
250#define USTORM_RX_PRODS_OFFSET(port, client_id) \
251 (IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \
252 : (0x4000 + (port * 0x360) + (client_id * 0x30)))
253#define USTORM_STATS_FLAGS_OFFSET(function) \
254 (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \
255 (function * 0x8)))
256#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095)
257#define USTORM_TPA_BTR_SIZE 0x1
258#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
259 (IS_E1H_OFFSET ? 0x9000 : 0x1000)
260#define XSTORM_ASSERT_LIST_OFFSET(idx) \
261 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
262#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
263 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50)))
264#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
265 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
266 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
267 0x28) + (index * 0x4)))
268#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
269 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
270 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
271#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
272 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
273 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
274#define XSTORM_E1HOV_OFFSET(function) \
275 (IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff)
276#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
277 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \
278 (function * 0x8)))
279#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
280 (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \
281 (function * 0x90)))
282#define XSTORM_FUNCTION_MODE_OFFSET \
283 (IS_E1H_OFFSET ? 0x2c50 : 0xffffffff)
284#define XSTORM_HC_BTR_OFFSET(port) \
285 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
286#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
287 (IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \
288 (function * 0x8)))
289#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \
290 (IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \
291 (function * 0x8)))
292#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \
293 (IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \
294 (function * 0x8)))
295#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \
296 (IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \
297 (function * 0x8)))
298#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \
299 (IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
300 (function * 0x8)))
301#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
302 (IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
303 (function * 0x8)))
304#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
305 (IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
306 (function * 0x8)))
307#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
308 (IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
309 (function * 0x8)))
310#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
311 (IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
312 (function * 0x8)))
313#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
314 (IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
315 (function * 0x8)))
316#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
317 (IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
318 (function * 0x8)))
319#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
320 (IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
321 (function * 0x8)))
322#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
323 (IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
324 (function * 0x8)))
325#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
326 (IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
327 (function * 0x8)))
328#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
329 (IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
330 (function * 0x8)))
331#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
332 (IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
333 (function * 0x8)))
334#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
335 (IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
336 (function * 0x8)))
337#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
338 (IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
339 0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
340#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
341 (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
342 (function * 0x90)))
343#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
344 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
345 (function * 0x10)))
346#define XSTORM_SPQ_PROD_OFFSET(function) \
347 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
348 (function * 0x10)))
349#define XSTORM_STATS_FLAGS_OFFSET(function) \
350 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
351 (function * 0x8)))
352#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
353 (IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
354#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
355 (IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
356#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
357 (IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
358 * 0x4)) : (0x1978 + (function * 0x4)))
359#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
360
361/**
362* This file defines HSI constants for the ETH flow
363*/
364#ifdef _EVEREST_MICROCODE
365#include "microcode_constants.h"
366#include "eth_rx_bd.h"
367#include "eth_tx_bd.h"
368#include "eth_rx_cqe.h"
369#include "eth_rx_sge.h"
370#include "eth_rx_cqe_next_page.h"
371#endif
372
373/* RSS hash types */
374#define DEFAULT_HASH_TYPE 0
375#define IPV4_HASH_TYPE 1
376#define TCP_IPV4_HASH_TYPE 2
377#define IPV6_HASH_TYPE 3
378#define TCP_IPV6_HASH_TYPE 4
379#define VLAN_PRI_HASH_TYPE 5
380#define E1HOV_PRI_HASH_TYPE 6
381#define DSCP_HASH_TYPE 7
382
383
384/* Ethernet Ring parameters */
385#define X_ETH_LOCAL_RING_SIZE 13
386#define FIRST_BD_IN_PKT 0
387#define PARSE_BD_INDEX 1
388#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
389#define U_ETH_NUM_OF_SGES_TO_FETCH 8
390#define U_ETH_MAX_SGES_FOR_PACKET 3
391
392/* Rx ring params */
393#define U_ETH_LOCAL_BD_RING_SIZE 8
394#define U_ETH_LOCAL_SGE_RING_SIZE 10
395#define U_ETH_SGL_SIZE 8
396
397
398#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
399 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
400
401#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
402#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
403#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
404
405#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
406#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
407#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
408
409#define U_ETH_UNDEFINED_Q 0xFF
410
411/* values of command IDs in the ramrod message */
412#define RAMROD_CMD_ID_ETH_PORT_SETUP 80
413#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 85
414#define RAMROD_CMD_ID_ETH_STAT_QUERY 90
415#define RAMROD_CMD_ID_ETH_UPDATE 100
416#define RAMROD_CMD_ID_ETH_HALT 105
417#define RAMROD_CMD_ID_ETH_SET_MAC 110
418#define RAMROD_CMD_ID_ETH_CFC_DEL 115
419#define RAMROD_CMD_ID_ETH_PORT_DEL 120
420#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 125
421
422
423/* command values for set mac command */
424#define T_ETH_MAC_COMMAND_SET 0
425#define T_ETH_MAC_COMMAND_INVALIDATE 1
426
427#define T_ETH_INDIRECTION_TABLE_SIZE 128
428
429/*The CRC32 seed, that is used for the hash(reduction) multicast address */
430#define T_ETH_CRC32_HASH_SEED 0x00000000
431
432/* Maximal L2 clients supported */
433#define ETH_MAX_RX_CLIENTS_E1 18
434#define ETH_MAX_RX_CLIENTS_E1H 26
435
436/* Maximal aggregation queues supported */
437#define ETH_MAX_AGGREGATION_QUEUES_E1 32
438#define ETH_MAX_AGGREGATION_QUEUES_E1H 64
439
440/* ETH RSS modes */
441#define ETH_RSS_MODE_DISABLED 0
442#define ETH_RSS_MODE_REGULAR 1
443#define ETH_RSS_MODE_VLAN_PRI 2
444#define ETH_RSS_MODE_E1HOV_PRI 3
445#define ETH_RSS_MODE_IP_DSCP 4
446
447
448/**
449* This file defines HSI constants common to all microcode flows
450*/
451
452/* Connection types */
453#define ETH_CONNECTION_TYPE 0
454#define TOE_CONNECTION_TYPE 1
455#define RDMA_CONNECTION_TYPE 2
456#define ISCSI_CONNECTION_TYPE 3
457#define FCOE_CONNECTION_TYPE 4
458#define RESERVED_CONNECTION_TYPE_0 5
459#define RESERVED_CONNECTION_TYPE_1 6
460#define RESERVED_CONNECTION_TYPE_2 7
461
462
463#define PROTOCOL_STATE_BIT_OFFSET 6
464
465#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
466#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
467#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
468
469/* microcode fixed page page size 4K (chains and ring segments) */
470#define MC_PAGE_SIZE 4096
471
472
473/* Host coalescing constants */
474#define HC_IGU_BC_MODE 0
475#define HC_IGU_NBC_MODE 1
476
477#define HC_REGULAR_SEGMENT 0
478#define HC_DEFAULT_SEGMENT 1
479
480/* index numbers */
481#define HC_USTORM_DEF_SB_NUM_INDICES 8
482#define HC_CSTORM_DEF_SB_NUM_INDICES 8
483#define HC_XSTORM_DEF_SB_NUM_INDICES 4
484#define HC_TSTORM_DEF_SB_NUM_INDICES 4
485#define HC_USTORM_SB_NUM_INDICES 4
486#define HC_CSTORM_SB_NUM_INDICES 4
487
488/* index values - which counter to update */
489
490#define HC_INDEX_U_TOE_RX_CQ_CONS 0
491#define HC_INDEX_U_ETH_RX_CQ_CONS 1
492#define HC_INDEX_U_ETH_RX_BD_CONS 2
493#define HC_INDEX_U_FCOE_EQ_CONS 3
494
495#define HC_INDEX_C_TOE_TX_CQ_CONS 0
496#define HC_INDEX_C_ETH_TX_CQ_CONS 1
497#define HC_INDEX_C_ISCSI_EQ_CONS 2
498
499#define HC_INDEX_DEF_X_SPQ_CONS 0
500
501#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
502#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
503#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
504#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
505#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
506#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
507#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
508
509#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
510#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
511#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
512#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
513#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
514#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
515
516/* used by the driver to get the SB offset */
517#define USTORM_ID 0
518#define CSTORM_ID 1
519#define XSTORM_ID 2
520#define TSTORM_ID 3
521#define ATTENTION_ID 4
522
523/* max number of slow path commands per port */
524#define MAX_RAMRODS_PER_PORT 8
525
526/* values for RX ETH CQE type field */
527#define RX_ETH_CQE_TYPE_ETH_FASTPATH 0
528#define RX_ETH_CQE_TYPE_ETH_RAMROD 1
529
530
531/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
532#define EMULATION_FREQUENCY_FACTOR 1600
533#define FPGA_FREQUENCY_FACTOR 100
534
535#define TIMERS_TICK_SIZE_CHIP (1e-3)
536#define TIMERS_TICK_SIZE_EMUL \
537 ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
538#define TIMERS_TICK_SIZE_FPGA \
539 ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
540
541#define TSEMI_CLK1_RESUL_CHIP (1e-3)
542#define TSEMI_CLK1_RESUL_EMUL \
543 ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
544#define TSEMI_CLK1_RESUL_FPGA \
545 ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
546
547#define USEMI_CLK1_RESUL_CHIP (TIMERS_TICK_SIZE_CHIP)
548#define USEMI_CLK1_RESUL_EMUL (TIMERS_TICK_SIZE_EMUL)
549#define USEMI_CLK1_RESUL_FPGA (TIMERS_TICK_SIZE_FPGA)
550
551#define XSEMI_CLK1_RESUL_CHIP (1e-3)
552#define XSEMI_CLK1_RESUL_EMUL \
553 ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
554#define XSEMI_CLK1_RESUL_FPGA \
555 ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
556
557#define XSEMI_CLK2_RESUL_CHIP (1e-6)
558#define XSEMI_CLK2_RESUL_EMUL \
559 ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
560#define XSEMI_CLK2_RESUL_FPGA \
561 ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
562
563#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
564#define SDM_TIMER_TICK_RESUL_EMUL \
565 ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
566#define SDM_TIMER_TICK_RESUL_FPGA \
567 ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
568
569
570/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
571#define XSTORM_IP_ID_ROLL_HALF 0x8000
572#define XSTORM_IP_ID_ROLL_ALL 0
573
574#define FW_LOG_LIST_SIZE 50
575
576#define NUM_OF_PROTOCOLS 4
577#define NUM_OF_SAFC_BITS 16
578#define MAX_COS_NUMBER 4
579#define MAX_T_STAT_COUNTER_ID 18
580#define MAX_X_STAT_COUNTER_ID 18
581#define MAX_U_STAT_COUNTER_ID 18
582
583
584#define UNKNOWN_ADDRESS 0
585#define UNICAST_ADDRESS 1
586#define MULTICAST_ADDRESS 2
587#define BROADCAST_ADDRESS 3
588
589#define SINGLE_FUNCTION 0
590#define MULTI_FUNCTION 1
591
592#define IP_V4 0
593#define IP_V6 1
594
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
new file mode 100644
index 00000000000..3f5ee5d7cc2
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -0,0 +1,37 @@
1/* bnx2x_fw_file_hdr.h: FW binary file header structure.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Vladislav Zolotarov <vladz@broadcom.com>
11 * Based on the original idea of John Wright <john.wright@hp.com>.
12 */
13
14#ifndef BNX2X_INIT_FILE_HDR_H
15#define BNX2X_INIT_FILE_HDR_H
16
17struct bnx2x_fw_file_section {
18 __be32 len;
19 __be32 offset;
20};
21
22struct bnx2x_fw_file_hdr {
23 struct bnx2x_fw_file_section init_ops;
24 struct bnx2x_fw_file_section init_ops_offsets;
25 struct bnx2x_fw_file_section init_data;
26 struct bnx2x_fw_file_section tsem_int_table_data;
27 struct bnx2x_fw_file_section tsem_pram_data;
28 struct bnx2x_fw_file_section usem_int_table_data;
29 struct bnx2x_fw_file_section usem_pram_data;
30 struct bnx2x_fw_file_section csem_int_table_data;
31 struct bnx2x_fw_file_section csem_pram_data;
32 struct bnx2x_fw_file_section xsem_int_table_data;
33 struct bnx2x_fw_file_section xsem_pram_data;
34 struct bnx2x_fw_file_section fw_version;
35};
36
37#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
new file mode 100644
index 00000000000..fd1f29e0317
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -0,0 +1,3138 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10struct license_key {
11 u32 reserved[6];
12
13#if defined(__BIG_ENDIAN)
14 u16 max_iscsi_init_conn;
15 u16 max_iscsi_trgt_conn;
16#elif defined(__LITTLE_ENDIAN)
17 u16 max_iscsi_trgt_conn;
18 u16 max_iscsi_init_conn;
19#endif
20
21 u32 reserved_a[6];
22};
23
24
25#define PORT_0 0
26#define PORT_1 1
27#define PORT_MAX 2
28
29/****************************************************************************
30 * Shared HW configuration *
31 ****************************************************************************/
32struct shared_hw_cfg { /* NVRAM Offset */
33 /* Up to 16 bytes of NULL-terminated string */
34 u8 part_num[16]; /* 0x104 */
35
36 u32 config; /* 0x114 */
37#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
38#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
39#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
40#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
41#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002
42
43#define SHARED_HW_CFG_PORT_SWAP 0x00000004
44
45#define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
46
47#define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
48#define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
49 /* Whatever MFW found in NVM
50 (if multiple found, priority order is: NC-SI, UMP, IPMI) */
51#define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
52#define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
53#define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
54#define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
55 /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
56 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
57#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
58 /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
59 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
60#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
61 /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
62 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
63#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
64
65#define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000
66#define SHARED_HW_CFG_LED_MODE_SHIFT 16
67#define SHARED_HW_CFG_LED_MAC1 0x00000000
68#define SHARED_HW_CFG_LED_PHY1 0x00010000
69#define SHARED_HW_CFG_LED_PHY2 0x00020000
70#define SHARED_HW_CFG_LED_PHY3 0x00030000
71#define SHARED_HW_CFG_LED_MAC2 0x00040000
72#define SHARED_HW_CFG_LED_PHY4 0x00050000
73#define SHARED_HW_CFG_LED_PHY5 0x00060000
74#define SHARED_HW_CFG_LED_PHY6 0x00070000
75#define SHARED_HW_CFG_LED_MAC3 0x00080000
76#define SHARED_HW_CFG_LED_PHY7 0x00090000
77#define SHARED_HW_CFG_LED_PHY9 0x000a0000
78#define SHARED_HW_CFG_LED_PHY11 0x000b0000
79#define SHARED_HW_CFG_LED_MAC4 0x000c0000
80#define SHARED_HW_CFG_LED_PHY8 0x000d0000
81
82#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
83#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
84#define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000
85#define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000
86#define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000
87#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000
88#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
89#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000
90
91 u32 config2; /* 0x118 */
92 /* one time auto detect grace period (in sec) */
93#define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff
94#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0
95
96#define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
97
98 /* The default value for the core clock is 250MHz and it is
99 achieved by setting the clock change to 4 */
100#define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00
101#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9
102
103#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
104#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
105
106#define SHARED_HW_CFG_HIDE_PORT1 0x00002000
107
108 /* The fan failure mechanism is usually related to the PHY type
109 since the power consumption of the board is determined by the PHY.
110 Currently, fan is required for most designs with SFX7101, BCM8727
111 and BCM8481. If a fan is not required for a board which uses one
112 of those PHYs, this field should be set to "Disabled". If a fan is
113 required for a different PHY type, this option should be set to
114 "Enabled".
115 The fan failure indication is expected on
116 SPIO5 */
117#define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
118#define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
119#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
120#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
121#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
122
123 u32 power_dissipated; /* 0x11c */
124#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
125#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
126
127#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
128#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
129#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
130#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
131#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
132#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
133
134 u32 ump_nc_si_config; /* 0x120 */
135#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
136#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
137#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
138#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
139#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
140#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
141
142#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00
143#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8
144
145#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000
146#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16
147#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000
148#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
149
150 u32 board; /* 0x124 */
151#define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000
152#define SHARED_HW_CFG_BOARD_REV_SHIFT 16
153
154#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000
155#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
156
157#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000
158#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
159
160 u32 reserved; /* 0x128 */
161
162};
163
164
165/****************************************************************************
166 * Port HW configuration *
167 ****************************************************************************/
168struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
169
170 u32 pci_id;
171#define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
172#define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
173
174 u32 pci_sub_id;
175#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000
176#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff
177
178 u32 power_dissipated;
179#define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000
180#define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
181#define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000
182#define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
183#define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00
184#define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
185#define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff
186#define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
187
188 u32 power_consumed;
189#define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000
190#define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
191#define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000
192#define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
193#define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00
194#define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
195#define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff
196#define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
197
198 u32 mac_upper;
199#define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff
200#define PORT_HW_CFG_UPPERMAC_SHIFT 0
201 u32 mac_lower;
202
203 u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */
204 u32 iscsi_mac_lower;
205
206 u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */
207 u32 rdma_mac_lower;
208
209 u32 serdes_config;
210#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF
211#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
212
213#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000
214#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
215
216
217 u32 Reserved0[16]; /* 0x158 */
218
219 /* for external PHY, or forced mode or during AN */
220 u16 xgxs_config_rx[4]; /* 0x198 */
221
222 u16 xgxs_config_tx[4]; /* 0x1A0 */
223
224 u32 Reserved1[64]; /* 0x1A8 */
225
226 u32 lane_config;
227#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
228#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
229#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
230#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
231#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
232#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
233#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000
234#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
235 /* AN and forced */
236#define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
237 /* forced only */
238#define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
239 /* forced only */
240#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
241 /* forced only */
242#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
243
244 u32 external_phy_config;
245#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
246#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
247#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
248#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000
249#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
250
251#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000
252#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
253
254#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00
255#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
256#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
257#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100
258#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200
259#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300
260#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400
261#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
262#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600
263#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
264#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
267#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
268#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
269#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
270
271#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
272#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
273
274 u32 speed_capability_mask;
275#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000
276#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
277#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
278#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
279#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
280#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
281#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
282#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
283#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
284#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12G 0x00800000
285#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12_5G 0x01000000
286#define PORT_HW_CFG_SPEED_CAPABILITY_D0_13G 0x02000000
287#define PORT_HW_CFG_SPEED_CAPABILITY_D0_15G 0x04000000
288#define PORT_HW_CFG_SPEED_CAPABILITY_D0_16G 0x08000000
289#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
290
291#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff
292#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
293#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
294#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
295#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
296#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
297#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
298#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
299#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
300#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12G 0x00000080
301#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12_5G 0x00000100
302#define PORT_HW_CFG_SPEED_CAPABILITY_D3_13G 0x00000200
303#define PORT_HW_CFG_SPEED_CAPABILITY_D3_15G 0x00000400
304#define PORT_HW_CFG_SPEED_CAPABILITY_D3_16G 0x00000800
305#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
306
307 u32 reserved[2];
308
309};
310
311
312/****************************************************************************
313 * Shared Feature configuration *
314 ****************************************************************************/
315struct shared_feat_cfg { /* NVRAM Offset */
316
317 u32 config; /* 0x450 */
318#define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
319
320 /* Use the values from options 47 and 48 instead of the HW default
321 values */
322#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
323#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
324
325#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100
326
327};
328
329
330/****************************************************************************
331 * Port Feature configuration *
332 ****************************************************************************/
333struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
334
335 u32 config;
336#define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
337#define PORT_FEATURE_BAR1_SIZE_SHIFT 0
338#define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000
339#define PORT_FEATURE_BAR1_SIZE_64K 0x00000001
340#define PORT_FEATURE_BAR1_SIZE_128K 0x00000002
341#define PORT_FEATURE_BAR1_SIZE_256K 0x00000003
342#define PORT_FEATURE_BAR1_SIZE_512K 0x00000004
343#define PORT_FEATURE_BAR1_SIZE_1M 0x00000005
344#define PORT_FEATURE_BAR1_SIZE_2M 0x00000006
345#define PORT_FEATURE_BAR1_SIZE_4M 0x00000007
346#define PORT_FEATURE_BAR1_SIZE_8M 0x00000008
347#define PORT_FEATURE_BAR1_SIZE_16M 0x00000009
348#define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a
349#define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b
350#define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c
351#define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d
352#define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e
353#define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f
354#define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0
355#define PORT_FEATURE_BAR2_SIZE_SHIFT 4
356#define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000
357#define PORT_FEATURE_BAR2_SIZE_64K 0x00000010
358#define PORT_FEATURE_BAR2_SIZE_128K 0x00000020
359#define PORT_FEATURE_BAR2_SIZE_256K 0x00000030
360#define PORT_FEATURE_BAR2_SIZE_512K 0x00000040
361#define PORT_FEATURE_BAR2_SIZE_1M 0x00000050
362#define PORT_FEATURE_BAR2_SIZE_2M 0x00000060
363#define PORT_FEATURE_BAR2_SIZE_4M 0x00000070
364#define PORT_FEATURE_BAR2_SIZE_8M 0x00000080
365#define PORT_FEATURE_BAR2_SIZE_16M 0x00000090
366#define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0
367#define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0
368#define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0
369#define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0
370#define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0
371#define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0
372#define PORT_FEATURE_EN_SIZE_MASK 0x07000000
373#define PORT_FEATURE_EN_SIZE_SHIFT 24
374#define PORT_FEATURE_WOL_ENABLED 0x01000000
375#define PORT_FEATURE_MBA_ENABLED 0x02000000
376#define PORT_FEATURE_MFW_ENABLED 0x04000000
377
378 /* Reserved bits: 28-29 */
379 /* Check the optic vendor via i2c against a list of approved modules
380 in a separate nvram image */
381#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000
382#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
383#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT 0x00000000
384#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER 0x20000000
385#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
386#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
387
388
389 u32 wol_config;
390 /* Default is used when driver sets to "auto" mode */
391#define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003
392#define PORT_FEATURE_WOL_DEFAULT_SHIFT 0
393#define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000
394#define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001
395#define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002
396#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003
397#define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004
398#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008
399#define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
400
401 u32 mba_config;
402#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000003
403#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
404#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
405#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
406#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
407#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
408#define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100
409#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200
410#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
411#define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
412#define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
413#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000
414#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
415#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
416#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
417#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
418#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
419#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
420#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
421#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
422#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
423#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
424#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
425#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
426#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
427#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
428#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
429#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
430#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
431#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000
432#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
433#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
434#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
435#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
436#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
437#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
438#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
439#define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000
440#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
441#define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
442#define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000
443#define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000
444#define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000
445#define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000
446#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000
447#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000
448#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000
449#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KX4 0x20000000
450#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KR 0x24000000
451#define PORT_FEATURE_MBA_LINK_SPEED_12GBPS 0x28000000
452#define PORT_FEATURE_MBA_LINK_SPEED_12_5GBPS 0x2c000000
453#define PORT_FEATURE_MBA_LINK_SPEED_13GBPS 0x30000000
454#define PORT_FEATURE_MBA_LINK_SPEED_15GBPS 0x34000000
455#define PORT_FEATURE_MBA_LINK_SPEED_16GBPS 0x38000000
456
457 u32 bmc_config;
458#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000
459#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001
460
461 u32 mba_vlan_cfg;
462#define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff
463#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
464#define PORT_FEATURE_MBA_VLAN_EN 0x00010000
465
466 u32 resource_cfg;
467#define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001
468#define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002
469#define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004
470#define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008
471#define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010
472
473 u32 smbus_config;
474 /* Obsolete */
475#define PORT_FEATURE_SMBUS_EN 0x00000001
476#define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
477#define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
478
479 u32 reserved1;
480
481 u32 link_config; /* Used as HW defaults for the driver */
482#define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
483#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
484 /* (forced) low speed switch (< 10G) */
485#define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
486 /* (forced) high speed switch (>= 10G) */
487#define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
488#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
489#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
490
491#define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000
492#define PORT_FEATURE_LINK_SPEED_SHIFT 16
493#define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
494#define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
495#define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
496#define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
497#define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
498#define PORT_FEATURE_LINK_SPEED_1G 0x00050000
499#define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
500#define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
501#define PORT_FEATURE_LINK_SPEED_10G_KX4 0x00080000
502#define PORT_FEATURE_LINK_SPEED_10G_KR 0x00090000
503#define PORT_FEATURE_LINK_SPEED_12G 0x000a0000
504#define PORT_FEATURE_LINK_SPEED_12_5G 0x000b0000
505#define PORT_FEATURE_LINK_SPEED_13G 0x000c0000
506#define PORT_FEATURE_LINK_SPEED_15G 0x000d0000
507#define PORT_FEATURE_LINK_SPEED_16G 0x000e0000
508
509#define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
510#define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
511#define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
512#define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
513#define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
514#define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
515#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
516
517 /* The default for MCP link configuration,
518 uses the same defines as link_config */
519 u32 mfw_wol_link_cfg;
520
521 u32 reserved[19];
522
523};
524
525
526/****************************************************************************
527 * Device Information *
528 ****************************************************************************/
529struct shm_dev_info { /* size */
530
531 u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
532
533 struct shared_hw_cfg shared_hw_config; /* 40 */
534
535 struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
536
537 struct shared_feat_cfg shared_feature_config; /* 4 */
538
539 struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
540
541};
542
543
544#define FUNC_0 0
545#define FUNC_1 1
546#define FUNC_2 2
547#define FUNC_3 3
548#define FUNC_4 4
549#define FUNC_5 5
550#define FUNC_6 6
551#define FUNC_7 7
552#define E1_FUNC_MAX 2
553#define E1H_FUNC_MAX 8
554
555#define VN_0 0
556#define VN_1 1
557#define VN_2 2
558#define VN_3 3
559#define E1VN_MAX 1
560#define E1HVN_MAX 4
561
562
563/* This value (in milliseconds) determines the frequency of the driver
564 * issuing the PULSE message code. The firmware monitors this periodic
565 * pulse to determine when to switch to an OS-absent mode. */
566#define DRV_PULSE_PERIOD_MS 250
567
568/* This value (in milliseconds) determines how long the driver should
569 * wait for an acknowledgement from the firmware before timing out. Once
570 * the firmware has timed out, the driver will assume there is no firmware
571 * running and there won't be any firmware-driver synchronization during a
572 * driver reset. */
573#define FW_ACK_TIME_OUT_MS 5000
574
575#define FW_ACK_POLL_TIME_MS 1
576
577#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
578
579/* LED Blink rate that will achieve ~15.9Hz */
580#define LED_BLINK_RATE_VAL 480
581
582/****************************************************************************
583 * Driver <-> FW Mailbox *
584 ****************************************************************************/
585struct drv_port_mb {
586
587 u32 link_status;
588 /* Driver should update this field on any link change event */
589
590#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
591#define LINK_STATUS_LINK_UP 0x00000001
592#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
593#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
594#define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
595#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
596#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
597#define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
598#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
599#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
600#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
601#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
602#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
603#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
604#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
605#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
606#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
607#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1)
608#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1)
609#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1)
610#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1)
611#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1)
612#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1)
613#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1)
614#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1)
615#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1)
616#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1)
617
618#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
619#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
620
621#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
622#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
623#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
624
625#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
626#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
627#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
628#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
629#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
630#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
631#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
632
633#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
634#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
635
636#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
637#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
638
639#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
640#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
641#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
642#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
643#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
644
645#define LINK_STATUS_SERDES_LINK 0x00100000
646
647#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
648#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
649#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
650#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000
651#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000
652#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000
653#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000
654#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000
655
656 u32 port_stx;
657
658 u32 stat_nig_timer;
659
660 /* MCP firmware does not use this field */
661 u32 ext_phy_fw_version;
662
663};
664
665
666struct drv_func_mb {
667
668 u32 drv_mb_header;
669#define DRV_MSG_CODE_MASK 0xffff0000
670#define DRV_MSG_CODE_LOAD_REQ 0x10000000
671#define DRV_MSG_CODE_LOAD_DONE 0x11000000
672#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
673#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
674#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
675#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
676#define DRV_MSG_CODE_DCC_OK 0x30000000
677#define DRV_MSG_CODE_DCC_FAILURE 0x31000000
678#define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
679#define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
680#define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
681#define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
682#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
683#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
684#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
685 /*
686 * The optic module verification commands require bootcode
687 * v5.0.6 or later
688 */
689#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
690#define REQ_BC_VER_4_VRFY_OPT_MDL 0x00050006
691
692#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
693#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
694#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
695#define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
696
697#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
698
699 u32 drv_mb_param;
700
701 u32 fw_mb_header;
702#define FW_MSG_CODE_MASK 0xffff0000
703#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
704#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
705#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
706#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
707#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
708#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
709#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
710#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
711#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
712#define FW_MSG_CODE_DCC_DONE 0x30100000
713#define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
714#define FW_MSG_CODE_DIAG_REFUSE 0x50200000
715#define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
716#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
717#define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
718#define FW_MSG_CODE_GET_KEY_DONE 0x80100000
719#define FW_MSG_CODE_NO_KEY 0x80f00000
720#define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
721#define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
722#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
723#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
724#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
725#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
726#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
727#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
728#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
729
730#define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
731#define FW_MSG_CODE_LIC_RESPONSE 0xff020000
732#define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
733#define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
734
735#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
736
737 u32 fw_mb_param;
738
739 u32 drv_pulse_mb;
740#define DRV_PULSE_SEQ_MASK 0x00007fff
741#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
742 /* The system time is in the format of
743 * (year-2001)*12*32 + month*32 + day. */
744#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
745 /* Indicate to the firmware not to go into the
746 * OS-absent when it is not getting driver pulse.
747 * This is used for debugging as well for PXE(MBA). */
748
749 u32 mcp_pulse_mb;
750#define MCP_PULSE_SEQ_MASK 0x00007fff
751#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
752 /* Indicates to the driver not to assert due to lack
753 * of MCP response */
754#define MCP_EVENT_MASK 0xffff0000
755#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
756
757 u32 iscsi_boot_signature;
758 u32 iscsi_boot_block_offset;
759
760 u32 drv_status;
761#define DRV_STATUS_PMF 0x00000001
762
763#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
764#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
765#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
766#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
767#define DRV_STATUS_DCC_RESERVED1 0x00000800
768#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
769#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
770
771 u32 virt_mac_upper;
772#define VIRT_MAC_SIGN_MASK 0xffff0000
773#define VIRT_MAC_SIGNATURE 0x564d0000
774 u32 virt_mac_lower;
775
776};
777
778
779/****************************************************************************
780 * Management firmware state *
781 ****************************************************************************/
782/* Allocate 440 bytes for management firmware */
783#define MGMTFW_STATE_WORD_SIZE 110
784
785struct mgmtfw_state {
786 u32 opaque[MGMTFW_STATE_WORD_SIZE];
787};
788
789
790/****************************************************************************
791 * Multi-Function configuration *
792 ****************************************************************************/
793struct shared_mf_cfg {
794
795 u32 clp_mb;
796#define SHARED_MF_CLP_SET_DEFAULT 0x00000000
797 /* set by CLP */
798#define SHARED_MF_CLP_EXIT 0x00000001
799 /* set by MCP */
800#define SHARED_MF_CLP_EXIT_DONE 0x00010000
801
802};
803
804struct port_mf_cfg {
805
806 u32 dynamic_cfg; /* device control channel */
807#define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
808#define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
809#define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
810
811 u32 reserved[3];
812
813};
814
815struct func_mf_cfg {
816
817 u32 config;
818 /* E/R/I/D */
819 /* function 0 of each port cannot be hidden */
820#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
821
822#define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007
823#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
824#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
825#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
826#define FUNC_MF_CFG_PROTOCOL_DEFAULT\
827 FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
828
829#define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
830
831 /* PRI */
832 /* 0 - low priority, 3 - high priority */
833#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
834#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
835#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
836
837 /* MINBW, MAXBW */
838 /* value range - 0..100, increments in 100Mbps */
839#define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
840#define FUNC_MF_CFG_MIN_BW_SHIFT 16
841#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
842#define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
843#define FUNC_MF_CFG_MAX_BW_SHIFT 24
844#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
845
846 u32 mac_upper; /* MAC */
847#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
848#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
849#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
850 u32 mac_lower;
851#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
852
853 u32 e1hov_tag; /* VNI */
854#define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
855#define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
856#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
857
858 u32 reserved[2];
859
860};
861
862struct mf_cfg {
863
864 struct shared_mf_cfg shared_mf_config;
865 struct port_mf_cfg port_mf_config[PORT_MAX];
866 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
867
868};
869
870
871/****************************************************************************
872 * Shared Memory Region *
873 ****************************************************************************/
874struct shmem_region { /* SharedMem Offset (size) */
875
876 u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
877#define SHR_MEM_FORMAT_REV_ID ('A'<<24)
878#define SHR_MEM_FORMAT_REV_MASK 0xff000000
879 /* validity bits */
880#define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
881#define SHR_MEM_VALIDITY_MB 0x00200000
882#define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
883#define SHR_MEM_VALIDITY_RESERVED 0x00000007
884 /* One licensing bit should be set */
885#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
886#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
887#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
888#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
889 /* Active MFW */
890#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
891#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
892#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
893#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
894#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
895#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
896
897 struct shm_dev_info dev_info; /* 0x8 (0x438) */
898
899 struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
900
901 /* FW information (for internal FW use) */
902 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
903 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
904
905 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
906 struct drv_func_mb func_mb[E1H_FUNC_MAX];
907
908 struct mf_cfg mf_cfg;
909
910}; /* 0x6dc */
911
912
913struct shmem2_region {
914
915 u32 size;
916
917 u32 dcc_support;
918#define SHMEM_DCC_SUPPORT_NONE 0x00000000
919#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
920#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
921#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
922#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
923#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
924#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
925
926};
927
928
929struct emac_stats {
930 u32 rx_stat_ifhcinoctets;
931 u32 rx_stat_ifhcinbadoctets;
932 u32 rx_stat_etherstatsfragments;
933 u32 rx_stat_ifhcinucastpkts;
934 u32 rx_stat_ifhcinmulticastpkts;
935 u32 rx_stat_ifhcinbroadcastpkts;
936 u32 rx_stat_dot3statsfcserrors;
937 u32 rx_stat_dot3statsalignmenterrors;
938 u32 rx_stat_dot3statscarriersenseerrors;
939 u32 rx_stat_xonpauseframesreceived;
940 u32 rx_stat_xoffpauseframesreceived;
941 u32 rx_stat_maccontrolframesreceived;
942 u32 rx_stat_xoffstateentered;
943 u32 rx_stat_dot3statsframestoolong;
944 u32 rx_stat_etherstatsjabbers;
945 u32 rx_stat_etherstatsundersizepkts;
946 u32 rx_stat_etherstatspkts64octets;
947 u32 rx_stat_etherstatspkts65octetsto127octets;
948 u32 rx_stat_etherstatspkts128octetsto255octets;
949 u32 rx_stat_etherstatspkts256octetsto511octets;
950 u32 rx_stat_etherstatspkts512octetsto1023octets;
951 u32 rx_stat_etherstatspkts1024octetsto1522octets;
952 u32 rx_stat_etherstatspktsover1522octets;
953
954 u32 rx_stat_falsecarriererrors;
955
956 u32 tx_stat_ifhcoutoctets;
957 u32 tx_stat_ifhcoutbadoctets;
958 u32 tx_stat_etherstatscollisions;
959 u32 tx_stat_outxonsent;
960 u32 tx_stat_outxoffsent;
961 u32 tx_stat_flowcontroldone;
962 u32 tx_stat_dot3statssinglecollisionframes;
963 u32 tx_stat_dot3statsmultiplecollisionframes;
964 u32 tx_stat_dot3statsdeferredtransmissions;
965 u32 tx_stat_dot3statsexcessivecollisions;
966 u32 tx_stat_dot3statslatecollisions;
967 u32 tx_stat_ifhcoutucastpkts;
968 u32 tx_stat_ifhcoutmulticastpkts;
969 u32 tx_stat_ifhcoutbroadcastpkts;
970 u32 tx_stat_etherstatspkts64octets;
971 u32 tx_stat_etherstatspkts65octetsto127octets;
972 u32 tx_stat_etherstatspkts128octetsto255octets;
973 u32 tx_stat_etherstatspkts256octetsto511octets;
974 u32 tx_stat_etherstatspkts512octetsto1023octets;
975 u32 tx_stat_etherstatspkts1024octetsto1522octets;
976 u32 tx_stat_etherstatspktsover1522octets;
977 u32 tx_stat_dot3statsinternalmactransmiterrors;
978};
979
980
981struct bmac_stats {
982 u32 tx_stat_gtpkt_lo;
983 u32 tx_stat_gtpkt_hi;
984 u32 tx_stat_gtxpf_lo;
985 u32 tx_stat_gtxpf_hi;
986 u32 tx_stat_gtfcs_lo;
987 u32 tx_stat_gtfcs_hi;
988 u32 tx_stat_gtmca_lo;
989 u32 tx_stat_gtmca_hi;
990 u32 tx_stat_gtbca_lo;
991 u32 tx_stat_gtbca_hi;
992 u32 tx_stat_gtfrg_lo;
993 u32 tx_stat_gtfrg_hi;
994 u32 tx_stat_gtovr_lo;
995 u32 tx_stat_gtovr_hi;
996 u32 tx_stat_gt64_lo;
997 u32 tx_stat_gt64_hi;
998 u32 tx_stat_gt127_lo;
999 u32 tx_stat_gt127_hi;
1000 u32 tx_stat_gt255_lo;
1001 u32 tx_stat_gt255_hi;
1002 u32 tx_stat_gt511_lo;
1003 u32 tx_stat_gt511_hi;
1004 u32 tx_stat_gt1023_lo;
1005 u32 tx_stat_gt1023_hi;
1006 u32 tx_stat_gt1518_lo;
1007 u32 tx_stat_gt1518_hi;
1008 u32 tx_stat_gt2047_lo;
1009 u32 tx_stat_gt2047_hi;
1010 u32 tx_stat_gt4095_lo;
1011 u32 tx_stat_gt4095_hi;
1012 u32 tx_stat_gt9216_lo;
1013 u32 tx_stat_gt9216_hi;
1014 u32 tx_stat_gt16383_lo;
1015 u32 tx_stat_gt16383_hi;
1016 u32 tx_stat_gtmax_lo;
1017 u32 tx_stat_gtmax_hi;
1018 u32 tx_stat_gtufl_lo;
1019 u32 tx_stat_gtufl_hi;
1020 u32 tx_stat_gterr_lo;
1021 u32 tx_stat_gterr_hi;
1022 u32 tx_stat_gtbyt_lo;
1023 u32 tx_stat_gtbyt_hi;
1024
1025 u32 rx_stat_gr64_lo;
1026 u32 rx_stat_gr64_hi;
1027 u32 rx_stat_gr127_lo;
1028 u32 rx_stat_gr127_hi;
1029 u32 rx_stat_gr255_lo;
1030 u32 rx_stat_gr255_hi;
1031 u32 rx_stat_gr511_lo;
1032 u32 rx_stat_gr511_hi;
1033 u32 rx_stat_gr1023_lo;
1034 u32 rx_stat_gr1023_hi;
1035 u32 rx_stat_gr1518_lo;
1036 u32 rx_stat_gr1518_hi;
1037 u32 rx_stat_gr2047_lo;
1038 u32 rx_stat_gr2047_hi;
1039 u32 rx_stat_gr4095_lo;
1040 u32 rx_stat_gr4095_hi;
1041 u32 rx_stat_gr9216_lo;
1042 u32 rx_stat_gr9216_hi;
1043 u32 rx_stat_gr16383_lo;
1044 u32 rx_stat_gr16383_hi;
1045 u32 rx_stat_grmax_lo;
1046 u32 rx_stat_grmax_hi;
1047 u32 rx_stat_grpkt_lo;
1048 u32 rx_stat_grpkt_hi;
1049 u32 rx_stat_grfcs_lo;
1050 u32 rx_stat_grfcs_hi;
1051 u32 rx_stat_grmca_lo;
1052 u32 rx_stat_grmca_hi;
1053 u32 rx_stat_grbca_lo;
1054 u32 rx_stat_grbca_hi;
1055 u32 rx_stat_grxcf_lo;
1056 u32 rx_stat_grxcf_hi;
1057 u32 rx_stat_grxpf_lo;
1058 u32 rx_stat_grxpf_hi;
1059 u32 rx_stat_grxuo_lo;
1060 u32 rx_stat_grxuo_hi;
1061 u32 rx_stat_grjbr_lo;
1062 u32 rx_stat_grjbr_hi;
1063 u32 rx_stat_grovr_lo;
1064 u32 rx_stat_grovr_hi;
1065 u32 rx_stat_grflr_lo;
1066 u32 rx_stat_grflr_hi;
1067 u32 rx_stat_grmeg_lo;
1068 u32 rx_stat_grmeg_hi;
1069 u32 rx_stat_grmeb_lo;
1070 u32 rx_stat_grmeb_hi;
1071 u32 rx_stat_grbyt_lo;
1072 u32 rx_stat_grbyt_hi;
1073 u32 rx_stat_grund_lo;
1074 u32 rx_stat_grund_hi;
1075 u32 rx_stat_grfrg_lo;
1076 u32 rx_stat_grfrg_hi;
1077 u32 rx_stat_grerb_lo;
1078 u32 rx_stat_grerb_hi;
1079 u32 rx_stat_grfre_lo;
1080 u32 rx_stat_grfre_hi;
1081 u32 rx_stat_gripj_lo;
1082 u32 rx_stat_gripj_hi;
1083};
1084
1085
1086union mac_stats {
1087 struct emac_stats emac_stats;
1088 struct bmac_stats bmac_stats;
1089};
1090
1091
1092struct mac_stx {
1093 /* in_bad_octets */
1094 u32 rx_stat_ifhcinbadoctets_hi;
1095 u32 rx_stat_ifhcinbadoctets_lo;
1096
1097 /* out_bad_octets */
1098 u32 tx_stat_ifhcoutbadoctets_hi;
1099 u32 tx_stat_ifhcoutbadoctets_lo;
1100
1101 /* crc_receive_errors */
1102 u32 rx_stat_dot3statsfcserrors_hi;
1103 u32 rx_stat_dot3statsfcserrors_lo;
1104 /* alignment_errors */
1105 u32 rx_stat_dot3statsalignmenterrors_hi;
1106 u32 rx_stat_dot3statsalignmenterrors_lo;
1107 /* carrier_sense_errors */
1108 u32 rx_stat_dot3statscarriersenseerrors_hi;
1109 u32 rx_stat_dot3statscarriersenseerrors_lo;
1110 /* false_carrier_detections */
1111 u32 rx_stat_falsecarriererrors_hi;
1112 u32 rx_stat_falsecarriererrors_lo;
1113
1114 /* runt_packets_received */
1115 u32 rx_stat_etherstatsundersizepkts_hi;
1116 u32 rx_stat_etherstatsundersizepkts_lo;
1117 /* jabber_packets_received */
1118 u32 rx_stat_dot3statsframestoolong_hi;
1119 u32 rx_stat_dot3statsframestoolong_lo;
1120
1121 /* error_runt_packets_received */
1122 u32 rx_stat_etherstatsfragments_hi;
1123 u32 rx_stat_etherstatsfragments_lo;
1124 /* error_jabber_packets_received */
1125 u32 rx_stat_etherstatsjabbers_hi;
1126 u32 rx_stat_etherstatsjabbers_lo;
1127
1128 /* control_frames_received */
1129 u32 rx_stat_maccontrolframesreceived_hi;
1130 u32 rx_stat_maccontrolframesreceived_lo;
1131 u32 rx_stat_bmac_xpf_hi;
1132 u32 rx_stat_bmac_xpf_lo;
1133 u32 rx_stat_bmac_xcf_hi;
1134 u32 rx_stat_bmac_xcf_lo;
1135
1136 /* xoff_state_entered */
1137 u32 rx_stat_xoffstateentered_hi;
1138 u32 rx_stat_xoffstateentered_lo;
1139 /* pause_xon_frames_received */
1140 u32 rx_stat_xonpauseframesreceived_hi;
1141 u32 rx_stat_xonpauseframesreceived_lo;
1142 /* pause_xoff_frames_received */
1143 u32 rx_stat_xoffpauseframesreceived_hi;
1144 u32 rx_stat_xoffpauseframesreceived_lo;
1145 /* pause_xon_frames_transmitted */
1146 u32 tx_stat_outxonsent_hi;
1147 u32 tx_stat_outxonsent_lo;
1148 /* pause_xoff_frames_transmitted */
1149 u32 tx_stat_outxoffsent_hi;
1150 u32 tx_stat_outxoffsent_lo;
1151 /* flow_control_done */
1152 u32 tx_stat_flowcontroldone_hi;
1153 u32 tx_stat_flowcontroldone_lo;
1154
1155 /* ether_stats_collisions */
1156 u32 tx_stat_etherstatscollisions_hi;
1157 u32 tx_stat_etherstatscollisions_lo;
1158 /* single_collision_transmit_frames */
1159 u32 tx_stat_dot3statssinglecollisionframes_hi;
1160 u32 tx_stat_dot3statssinglecollisionframes_lo;
1161 /* multiple_collision_transmit_frames */
1162 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
1163 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
1164 /* deferred_transmissions */
1165 u32 tx_stat_dot3statsdeferredtransmissions_hi;
1166 u32 tx_stat_dot3statsdeferredtransmissions_lo;
1167 /* excessive_collision_frames */
1168 u32 tx_stat_dot3statsexcessivecollisions_hi;
1169 u32 tx_stat_dot3statsexcessivecollisions_lo;
1170 /* late_collision_frames */
1171 u32 tx_stat_dot3statslatecollisions_hi;
1172 u32 tx_stat_dot3statslatecollisions_lo;
1173
1174 /* frames_transmitted_64_bytes */
1175 u32 tx_stat_etherstatspkts64octets_hi;
1176 u32 tx_stat_etherstatspkts64octets_lo;
1177 /* frames_transmitted_65_127_bytes */
1178 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
1179 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
1180 /* frames_transmitted_128_255_bytes */
1181 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
1182 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
1183 /* frames_transmitted_256_511_bytes */
1184 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
1185 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
1186 /* frames_transmitted_512_1023_bytes */
1187 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
1188 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
1189 /* frames_transmitted_1024_1522_bytes */
1190 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
1191 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
1192 /* frames_transmitted_1523_9022_bytes */
1193 u32 tx_stat_etherstatspktsover1522octets_hi;
1194 u32 tx_stat_etherstatspktsover1522octets_lo;
1195 u32 tx_stat_bmac_2047_hi;
1196 u32 tx_stat_bmac_2047_lo;
1197 u32 tx_stat_bmac_4095_hi;
1198 u32 tx_stat_bmac_4095_lo;
1199 u32 tx_stat_bmac_9216_hi;
1200 u32 tx_stat_bmac_9216_lo;
1201 u32 tx_stat_bmac_16383_hi;
1202 u32 tx_stat_bmac_16383_lo;
1203
1204 /* internal_mac_transmit_errors */
1205 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
1206 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
1207
1208 /* if_out_discards */
1209 u32 tx_stat_bmac_ufl_hi;
1210 u32 tx_stat_bmac_ufl_lo;
1211};
1212
1213
1214#define MAC_STX_IDX_MAX 2
1215
1216struct host_port_stats {
1217 u32 host_port_stats_start;
1218
1219 struct mac_stx mac_stx[MAC_STX_IDX_MAX];
1220
1221 u32 brb_drop_hi;
1222 u32 brb_drop_lo;
1223
1224 u32 host_port_stats_end;
1225};
1226
1227
1228struct host_func_stats {
1229 u32 host_func_stats_start;
1230
1231 u32 total_bytes_received_hi;
1232 u32 total_bytes_received_lo;
1233
1234 u32 total_bytes_transmitted_hi;
1235 u32 total_bytes_transmitted_lo;
1236
1237 u32 total_unicast_packets_received_hi;
1238 u32 total_unicast_packets_received_lo;
1239
1240 u32 total_multicast_packets_received_hi;
1241 u32 total_multicast_packets_received_lo;
1242
1243 u32 total_broadcast_packets_received_hi;
1244 u32 total_broadcast_packets_received_lo;
1245
1246 u32 total_unicast_packets_transmitted_hi;
1247 u32 total_unicast_packets_transmitted_lo;
1248
1249 u32 total_multicast_packets_transmitted_hi;
1250 u32 total_multicast_packets_transmitted_lo;
1251
1252 u32 total_broadcast_packets_transmitted_hi;
1253 u32 total_broadcast_packets_transmitted_lo;
1254
1255 u32 valid_bytes_received_hi;
1256 u32 valid_bytes_received_lo;
1257
1258 u32 host_func_stats_end;
1259};
1260
1261
1262#define BCM_5710_FW_MAJOR_VERSION 5
1263#define BCM_5710_FW_MINOR_VERSION 2
1264#define BCM_5710_FW_REVISION_VERSION 13
1265#define BCM_5710_FW_ENGINEERING_VERSION 0
1266#define BCM_5710_FW_COMPILE_FLAGS 1
1267
1268
1269/*
1270 * attention bits
1271 */
1272struct atten_def_status_block {
1273 __le32 attn_bits;
1274 __le32 attn_bits_ack;
1275 u8 status_block_id;
1276 u8 reserved0;
1277 __le16 attn_bits_index;
1278 __le32 reserved1;
1279};
1280
1281
1282/*
1283 * common data for all protocols
1284 */
1285struct doorbell_hdr {
1286 u8 header;
1287#define DOORBELL_HDR_RX (0x1<<0)
1288#define DOORBELL_HDR_RX_SHIFT 0
1289#define DOORBELL_HDR_DB_TYPE (0x1<<1)
1290#define DOORBELL_HDR_DB_TYPE_SHIFT 1
1291#define DOORBELL_HDR_DPM_SIZE (0x3<<2)
1292#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
1293#define DOORBELL_HDR_CONN_TYPE (0xF<<4)
1294#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
1295};
1296
1297/*
1298 * doorbell message sent to the chip
1299 */
1300struct doorbell {
1301#if defined(__BIG_ENDIAN)
1302 u16 zero_fill2;
1303 u8 zero_fill1;
1304 struct doorbell_hdr header;
1305#elif defined(__LITTLE_ENDIAN)
1306 struct doorbell_hdr header;
1307 u8 zero_fill1;
1308 u16 zero_fill2;
1309#endif
1310};
1311
1312
1313/*
1314 * doorbell message sent to the chip
1315 */
1316struct doorbell_set_prod {
1317#if defined(__BIG_ENDIAN)
1318 u16 prod;
1319 u8 zero_fill1;
1320 struct doorbell_hdr header;
1321#elif defined(__LITTLE_ENDIAN)
1322 struct doorbell_hdr header;
1323 u8 zero_fill1;
1324 u16 prod;
1325#endif
1326};
1327
1328
1329/*
1330 * IGU driver acknowledgement register
1331 */
1332struct igu_ack_register {
1333#if defined(__BIG_ENDIAN)
1334 u16 sb_id_and_flags;
1335#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
1336#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
1337#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
1338#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
1339#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
1340#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
1341#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
1342#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
1343#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
1344#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
1345 u16 status_block_index;
1346#elif defined(__LITTLE_ENDIAN)
1347 u16 status_block_index;
1348 u16 sb_id_and_flags;
1349#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
1350#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
1351#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
1352#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
1353#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
1354#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
1355#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
1356#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
1357#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
1358#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
1359#endif
1360};
1361
1362
1363/*
1364 * IGU driver acknowledgement register
1365 */
1366struct igu_backward_compatible {
1367 u32 sb_id_and_flags;
1368#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
1369#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
1370#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
1371#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
1372#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
1373#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
1374#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
1375#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
1376#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
1377#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
1378#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
1379#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
1380 u32 reserved_2;
1381};
1382
1383
1384/*
1385 * IGU driver acknowledgement register
1386 */
1387struct igu_regular {
1388 u32 sb_id_and_flags;
1389#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
1390#define IGU_REGULAR_SB_INDEX_SHIFT 0
1391#define IGU_REGULAR_RESERVED0 (0x1<<20)
1392#define IGU_REGULAR_RESERVED0_SHIFT 20
1393#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
1394#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
1395#define IGU_REGULAR_BUPDATE (0x1<<24)
1396#define IGU_REGULAR_BUPDATE_SHIFT 24
1397#define IGU_REGULAR_ENABLE_INT (0x3<<25)
1398#define IGU_REGULAR_ENABLE_INT_SHIFT 25
1399#define IGU_REGULAR_RESERVED_1 (0x1<<27)
1400#define IGU_REGULAR_RESERVED_1_SHIFT 27
1401#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
1402#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
1403#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
1404#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
1405#define IGU_REGULAR_BCLEANUP (0x1<<31)
1406#define IGU_REGULAR_BCLEANUP_SHIFT 31
1407 u32 reserved_2;
1408};
1409
1410/*
1411 * IGU driver acknowledgement register
1412 */
1413union igu_consprod_reg {
1414 struct igu_regular regular;
1415 struct igu_backward_compatible backward_compatible;
1416};
1417
1418
1419/*
1420 * Parser parsing flags field
1421 */
1422struct parsing_flags {
1423 __le16 flags;
1424#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
1425#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
1426#define PARSING_FLAGS_VLAN (0x1<<1)
1427#define PARSING_FLAGS_VLAN_SHIFT 1
1428#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
1429#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
1430#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
1431#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
1432#define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
1433#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
1434#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6)
1435#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
1436#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7)
1437#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
1438#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9)
1439#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
1440#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10)
1441#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
1442#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11)
1443#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
1444#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12)
1445#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
1446#define PARSING_FLAGS_LLC_SNAP (0x1<<13)
1447#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
1448#define PARSING_FLAGS_RESERVED0 (0x3<<14)
1449#define PARSING_FLAGS_RESERVED0_SHIFT 14
1450};
1451
1452
1453struct regpair {
1454 __le32 lo;
1455 __le32 hi;
1456};
1457
1458
1459/*
1460 * dmae command structure
1461 */
1462struct dmae_command {
1463 u32 opcode;
1464#define DMAE_COMMAND_SRC (0x1<<0)
1465#define DMAE_COMMAND_SRC_SHIFT 0
1466#define DMAE_COMMAND_DST (0x3<<1)
1467#define DMAE_COMMAND_DST_SHIFT 1
1468#define DMAE_COMMAND_C_DST (0x1<<3)
1469#define DMAE_COMMAND_C_DST_SHIFT 3
1470#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
1471#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
1472#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
1473#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
1474#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
1475#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
1476#define DMAE_COMMAND_ENDIANITY (0x3<<9)
1477#define DMAE_COMMAND_ENDIANITY_SHIFT 9
1478#define DMAE_COMMAND_PORT (0x1<<11)
1479#define DMAE_COMMAND_PORT_SHIFT 11
1480#define DMAE_COMMAND_CRC_RESET (0x1<<12)
1481#define DMAE_COMMAND_CRC_RESET_SHIFT 12
1482#define DMAE_COMMAND_SRC_RESET (0x1<<13)
1483#define DMAE_COMMAND_SRC_RESET_SHIFT 13
1484#define DMAE_COMMAND_DST_RESET (0x1<<14)
1485#define DMAE_COMMAND_DST_RESET_SHIFT 14
1486#define DMAE_COMMAND_E1HVN (0x3<<15)
1487#define DMAE_COMMAND_E1HVN_SHIFT 15
1488#define DMAE_COMMAND_RESERVED0 (0x7FFF<<17)
1489#define DMAE_COMMAND_RESERVED0_SHIFT 17
1490 u32 src_addr_lo;
1491 u32 src_addr_hi;
1492 u32 dst_addr_lo;
1493 u32 dst_addr_hi;
1494#if defined(__BIG_ENDIAN)
1495 u16 reserved1;
1496 u16 len;
1497#elif defined(__LITTLE_ENDIAN)
1498 u16 len;
1499 u16 reserved1;
1500#endif
1501 u32 comp_addr_lo;
1502 u32 comp_addr_hi;
1503 u32 comp_val;
1504 u32 crc32;
1505 u32 crc32_c;
1506#if defined(__BIG_ENDIAN)
1507 u16 crc16_c;
1508 u16 crc16;
1509#elif defined(__LITTLE_ENDIAN)
1510 u16 crc16;
1511 u16 crc16_c;
1512#endif
1513#if defined(__BIG_ENDIAN)
1514 u16 reserved2;
1515 u16 crc_t10;
1516#elif defined(__LITTLE_ENDIAN)
1517 u16 crc_t10;
1518 u16 reserved2;
1519#endif
1520#if defined(__BIG_ENDIAN)
1521 u16 xsum8;
1522 u16 xsum16;
1523#elif defined(__LITTLE_ENDIAN)
1524 u16 xsum16;
1525 u16 xsum8;
1526#endif
1527};
1528
1529
1530struct double_regpair {
1531 u32 regpair0_lo;
1532 u32 regpair0_hi;
1533 u32 regpair1_lo;
1534 u32 regpair1_hi;
1535};
1536
1537
1538/*
1539 * The eth storm context of Ustorm (configuration part)
1540 */
1541struct ustorm_eth_st_context_config {
1542#if defined(__BIG_ENDIAN)
1543 u8 flags;
1544#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
1545#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
1546#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
1547#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1548#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1549#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1550#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1551#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1552#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1553#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1554 u8 status_block_id;
1555 u8 clientId;
1556 u8 sb_index_numbers;
1557#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1558#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1559#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1560#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1561#elif defined(__LITTLE_ENDIAN)
1562 u8 sb_index_numbers;
1563#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1564#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1565#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1566#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1567 u8 clientId;
1568 u8 status_block_id;
1569 u8 flags;
1570#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
1571#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
1572#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
1573#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1574#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1575#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1576#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1577#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1578#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1579#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1580#endif
1581#if defined(__BIG_ENDIAN)
1582 u16 bd_buff_size;
1583 u8 statistics_counter_id;
1584 u8 mc_alignment_log_size;
1585#elif defined(__LITTLE_ENDIAN)
1586 u8 mc_alignment_log_size;
1587 u8 statistics_counter_id;
1588 u16 bd_buff_size;
1589#endif
1590#if defined(__BIG_ENDIAN)
1591 u8 __local_sge_prod;
1592 u8 __local_bd_prod;
1593 u16 sge_buff_size;
1594#elif defined(__LITTLE_ENDIAN)
1595 u16 sge_buff_size;
1596 u8 __local_bd_prod;
1597 u8 __local_sge_prod;
1598#endif
1599#if defined(__BIG_ENDIAN)
1600 u16 __sdm_bd_expected_counter;
1601 u8 cstorm_agg_int;
1602 u8 __expected_bds_on_ram;
1603#elif defined(__LITTLE_ENDIAN)
1604 u8 __expected_bds_on_ram;
1605 u8 cstorm_agg_int;
1606 u16 __sdm_bd_expected_counter;
1607#endif
1608#if defined(__BIG_ENDIAN)
1609 u16 __ring_data_ram_addr;
1610 u16 __hc_cstorm_ram_addr;
1611#elif defined(__LITTLE_ENDIAN)
1612 u16 __hc_cstorm_ram_addr;
1613 u16 __ring_data_ram_addr;
1614#endif
1615#if defined(__BIG_ENDIAN)
1616 u8 reserved1;
1617 u8 max_sges_for_packet;
1618 u16 __bd_ring_ram_addr;
1619#elif defined(__LITTLE_ENDIAN)
1620 u16 __bd_ring_ram_addr;
1621 u8 max_sges_for_packet;
1622 u8 reserved1;
1623#endif
1624 u32 bd_page_base_lo;
1625 u32 bd_page_base_hi;
1626 u32 sge_page_base_lo;
1627 u32 sge_page_base_hi;
1628 struct regpair reserved2;
1629};
1630
1631/*
1632 * The eth Rx Buffer Descriptor
1633 */
1634struct eth_rx_bd {
1635 __le32 addr_lo;
1636 __le32 addr_hi;
1637};
1638
1639/*
1640 * The eth Rx SGE Descriptor
1641 */
1642struct eth_rx_sge {
1643 __le32 addr_lo;
1644 __le32 addr_hi;
1645};
1646
1647/*
1648 * Local BDs and SGEs rings (in ETH)
1649 */
1650struct eth_local_rx_rings {
1651 struct eth_rx_bd __local_bd_ring[8];
1652 struct eth_rx_sge __local_sge_ring[10];
1653};
1654
1655/*
1656 * The eth storm context of Ustorm
1657 */
1658struct ustorm_eth_st_context {
1659 struct ustorm_eth_st_context_config common;
1660 struct eth_local_rx_rings __rings;
1661};
1662
1663/*
1664 * The eth storm context of Tstorm
1665 */
1666struct tstorm_eth_st_context {
1667 u32 __reserved0[28];
1668};
1669
1670/*
1671 * The eth aggregative context section of Xstorm
1672 */
1673struct xstorm_eth_extra_ag_context_section {
1674#if defined(__BIG_ENDIAN)
1675 u8 __tcp_agg_vars1;
1676 u8 __reserved50;
1677 u16 __mss;
1678#elif defined(__LITTLE_ENDIAN)
1679 u16 __mss;
1680 u8 __reserved50;
1681 u8 __tcp_agg_vars1;
1682#endif
1683 u32 __snd_nxt;
1684 u32 __tx_wnd;
1685 u32 __snd_una;
1686 u32 __reserved53;
1687#if defined(__BIG_ENDIAN)
1688 u8 __agg_val8_th;
1689 u8 __agg_val8;
1690 u16 __tcp_agg_vars2;
1691#elif defined(__LITTLE_ENDIAN)
1692 u16 __tcp_agg_vars2;
1693 u8 __agg_val8;
1694 u8 __agg_val8_th;
1695#endif
1696 u32 __reserved58;
1697 u32 __reserved59;
1698 u32 __reserved60;
1699 u32 __reserved61;
1700#if defined(__BIG_ENDIAN)
1701 u16 __agg_val7_th;
1702 u16 __agg_val7;
1703#elif defined(__LITTLE_ENDIAN)
1704 u16 __agg_val7;
1705 u16 __agg_val7_th;
1706#endif
1707#if defined(__BIG_ENDIAN)
1708 u8 __tcp_agg_vars5;
1709 u8 __tcp_agg_vars4;
1710 u8 __tcp_agg_vars3;
1711 u8 __reserved62;
1712#elif defined(__LITTLE_ENDIAN)
1713 u8 __reserved62;
1714 u8 __tcp_agg_vars3;
1715 u8 __tcp_agg_vars4;
1716 u8 __tcp_agg_vars5;
1717#endif
1718 u32 __tcp_agg_vars6;
1719#if defined(__BIG_ENDIAN)
1720 u16 __agg_misc6;
1721 u16 __tcp_agg_vars7;
1722#elif defined(__LITTLE_ENDIAN)
1723 u16 __tcp_agg_vars7;
1724 u16 __agg_misc6;
1725#endif
1726 u32 __agg_val10;
1727 u32 __agg_val10_th;
1728#if defined(__BIG_ENDIAN)
1729 u16 __reserved3;
1730 u8 __reserved2;
1731 u8 __da_only_cnt;
1732#elif defined(__LITTLE_ENDIAN)
1733 u8 __da_only_cnt;
1734 u8 __reserved2;
1735 u16 __reserved3;
1736#endif
1737};
1738
1739/*
1740 * The eth aggregative context of Xstorm
1741 */
1742struct xstorm_eth_ag_context {
1743#if defined(__BIG_ENDIAN)
1744 u16 agg_val1;
1745 u8 __agg_vars1;
1746 u8 __state;
1747#elif defined(__LITTLE_ENDIAN)
1748 u8 __state;
1749 u8 __agg_vars1;
1750 u16 agg_val1;
1751#endif
1752#if defined(__BIG_ENDIAN)
1753 u8 cdu_reserved;
1754 u8 __agg_vars4;
1755 u8 __agg_vars3;
1756 u8 __agg_vars2;
1757#elif defined(__LITTLE_ENDIAN)
1758 u8 __agg_vars2;
1759 u8 __agg_vars3;
1760 u8 __agg_vars4;
1761 u8 cdu_reserved;
1762#endif
1763 u32 __bd_prod;
1764#if defined(__BIG_ENDIAN)
1765 u16 __agg_vars5;
1766 u16 __agg_val4_th;
1767#elif defined(__LITTLE_ENDIAN)
1768 u16 __agg_val4_th;
1769 u16 __agg_vars5;
1770#endif
1771 struct xstorm_eth_extra_ag_context_section __extra_section;
1772#if defined(__BIG_ENDIAN)
1773 u16 __agg_vars7;
1774 u8 __agg_val3_th;
1775 u8 __agg_vars6;
1776#elif defined(__LITTLE_ENDIAN)
1777 u8 __agg_vars6;
1778 u8 __agg_val3_th;
1779 u16 __agg_vars7;
1780#endif
1781#if defined(__BIG_ENDIAN)
1782 u16 __agg_val11_th;
1783 u16 __agg_val11;
1784#elif defined(__LITTLE_ENDIAN)
1785 u16 __agg_val11;
1786 u16 __agg_val11_th;
1787#endif
1788#if defined(__BIG_ENDIAN)
1789 u8 __reserved1;
1790 u8 __agg_val6_th;
1791 u16 __agg_val9;
1792#elif defined(__LITTLE_ENDIAN)
1793 u16 __agg_val9;
1794 u8 __agg_val6_th;
1795 u8 __reserved1;
1796#endif
1797#if defined(__BIG_ENDIAN)
1798 u16 __agg_val2_th;
1799 u16 __agg_val2;
1800#elif defined(__LITTLE_ENDIAN)
1801 u16 __agg_val2;
1802 u16 __agg_val2_th;
1803#endif
1804 u32 __agg_vars8;
1805#if defined(__BIG_ENDIAN)
1806 u16 __agg_misc0;
1807 u16 __agg_val4;
1808#elif defined(__LITTLE_ENDIAN)
1809 u16 __agg_val4;
1810 u16 __agg_misc0;
1811#endif
1812#if defined(__BIG_ENDIAN)
1813 u8 __agg_val3;
1814 u8 __agg_val6;
1815 u8 __agg_val5_th;
1816 u8 __agg_val5;
1817#elif defined(__LITTLE_ENDIAN)
1818 u8 __agg_val5;
1819 u8 __agg_val5_th;
1820 u8 __agg_val6;
1821 u8 __agg_val3;
1822#endif
1823#if defined(__BIG_ENDIAN)
1824 u16 __agg_misc1;
1825 u16 __bd_ind_max_val;
1826#elif defined(__LITTLE_ENDIAN)
1827 u16 __bd_ind_max_val;
1828 u16 __agg_misc1;
1829#endif
1830 u32 __reserved57;
1831 u32 __agg_misc4;
1832 u32 __agg_misc5;
1833};
1834
1835/*
1836 * The eth extra aggregative context section of Tstorm
1837 */
1838struct tstorm_eth_extra_ag_context_section {
1839 u32 __agg_val1;
1840#if defined(__BIG_ENDIAN)
1841 u8 __tcp_agg_vars2;
1842 u8 __agg_val3;
1843 u16 __agg_val2;
1844#elif defined(__LITTLE_ENDIAN)
1845 u16 __agg_val2;
1846 u8 __agg_val3;
1847 u8 __tcp_agg_vars2;
1848#endif
1849#if defined(__BIG_ENDIAN)
1850 u16 __agg_val5;
1851 u8 __agg_val6;
1852 u8 __tcp_agg_vars3;
1853#elif defined(__LITTLE_ENDIAN)
1854 u8 __tcp_agg_vars3;
1855 u8 __agg_val6;
1856 u16 __agg_val5;
1857#endif
1858 u32 __reserved63;
1859 u32 __reserved64;
1860 u32 __reserved65;
1861 u32 __reserved66;
1862 u32 __reserved67;
1863 u32 __tcp_agg_vars1;
1864 u32 __reserved61;
1865 u32 __reserved62;
1866 u32 __reserved2;
1867};
1868
1869/*
1870 * The eth aggregative context of Tstorm
1871 */
1872struct tstorm_eth_ag_context {
1873#if defined(__BIG_ENDIAN)
1874 u16 __reserved54;
1875 u8 __agg_vars1;
1876 u8 __state;
1877#elif defined(__LITTLE_ENDIAN)
1878 u8 __state;
1879 u8 __agg_vars1;
1880 u16 __reserved54;
1881#endif
1882#if defined(__BIG_ENDIAN)
1883 u16 __agg_val4;
1884 u16 __agg_vars2;
1885#elif defined(__LITTLE_ENDIAN)
1886 u16 __agg_vars2;
1887 u16 __agg_val4;
1888#endif
1889 struct tstorm_eth_extra_ag_context_section __extra_section;
1890};
1891
1892/*
1893 * The eth aggregative context of Cstorm
1894 */
1895struct cstorm_eth_ag_context {
1896 u32 __agg_vars1;
1897#if defined(__BIG_ENDIAN)
1898 u8 __aux1_th;
1899 u8 __aux1_val;
1900 u16 __agg_vars2;
1901#elif defined(__LITTLE_ENDIAN)
1902 u16 __agg_vars2;
1903 u8 __aux1_val;
1904 u8 __aux1_th;
1905#endif
1906 u32 __num_of_treated_packet;
1907 u32 __last_packet_treated;
1908#if defined(__BIG_ENDIAN)
1909 u16 __reserved58;
1910 u16 __reserved57;
1911#elif defined(__LITTLE_ENDIAN)
1912 u16 __reserved57;
1913 u16 __reserved58;
1914#endif
1915#if defined(__BIG_ENDIAN)
1916 u8 __reserved62;
1917 u8 __reserved61;
1918 u8 __reserved60;
1919 u8 __reserved59;
1920#elif defined(__LITTLE_ENDIAN)
1921 u8 __reserved59;
1922 u8 __reserved60;
1923 u8 __reserved61;
1924 u8 __reserved62;
1925#endif
1926#if defined(__BIG_ENDIAN)
1927 u16 __reserved64;
1928 u16 __reserved63;
1929#elif defined(__LITTLE_ENDIAN)
1930 u16 __reserved63;
1931 u16 __reserved64;
1932#endif
1933 u32 __reserved65;
1934#if defined(__BIG_ENDIAN)
1935 u16 __agg_vars3;
1936 u16 __rq_inv_cnt;
1937#elif defined(__LITTLE_ENDIAN)
1938 u16 __rq_inv_cnt;
1939 u16 __agg_vars3;
1940#endif
1941#if defined(__BIG_ENDIAN)
1942 u16 __packet_index_th;
1943 u16 __packet_index;
1944#elif defined(__LITTLE_ENDIAN)
1945 u16 __packet_index;
1946 u16 __packet_index_th;
1947#endif
1948};
1949
1950/*
1951 * The eth aggregative context of Ustorm
1952 */
1953struct ustorm_eth_ag_context {
1954#if defined(__BIG_ENDIAN)
1955 u8 __aux_counter_flags;
1956 u8 __agg_vars2;
1957 u8 __agg_vars1;
1958 u8 __state;
1959#elif defined(__LITTLE_ENDIAN)
1960 u8 __state;
1961 u8 __agg_vars1;
1962 u8 __agg_vars2;
1963 u8 __aux_counter_flags;
1964#endif
1965#if defined(__BIG_ENDIAN)
1966 u8 cdu_usage;
1967 u8 __agg_misc2;
1968 u16 __agg_misc1;
1969#elif defined(__LITTLE_ENDIAN)
1970 u16 __agg_misc1;
1971 u8 __agg_misc2;
1972 u8 cdu_usage;
1973#endif
1974 u32 __agg_misc4;
1975#if defined(__BIG_ENDIAN)
1976 u8 __agg_val3_th;
1977 u8 __agg_val3;
1978 u16 __agg_misc3;
1979#elif defined(__LITTLE_ENDIAN)
1980 u16 __agg_misc3;
1981 u8 __agg_val3;
1982 u8 __agg_val3_th;
1983#endif
1984 u32 __agg_val1;
1985 u32 __agg_misc4_th;
1986#if defined(__BIG_ENDIAN)
1987 u16 __agg_val2_th;
1988 u16 __agg_val2;
1989#elif defined(__LITTLE_ENDIAN)
1990 u16 __agg_val2;
1991 u16 __agg_val2_th;
1992#endif
1993#if defined(__BIG_ENDIAN)
1994 u16 __reserved2;
1995 u8 __decision_rules;
1996 u8 __decision_rule_enable_bits;
1997#elif defined(__LITTLE_ENDIAN)
1998 u8 __decision_rule_enable_bits;
1999 u8 __decision_rules;
2000 u16 __reserved2;
2001#endif
2002};
2003
2004/*
2005 * Timers connection context
2006 */
2007struct timers_block_context {
2008 u32 __reserved_0;
2009 u32 __reserved_1;
2010 u32 __reserved_2;
2011 u32 flags;
2012#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
2013#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
2014#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
2015#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
2016#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
2017#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
2018};
2019
2020/*
2021 * structure for easy accessibility to assembler
2022 */
2023struct eth_tx_bd_flags {
2024 u8 as_bitfield;
2025#define ETH_TX_BD_FLAGS_VLAN_TAG (0x1<<0)
2026#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0
2027#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1)
2028#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1
2029#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<2)
2030#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 2
2031#define ETH_TX_BD_FLAGS_END_BD (0x1<<3)
2032#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3
2033#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
2034#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
2035#define ETH_TX_BD_FLAGS_HDR_POOL (0x1<<5)
2036#define ETH_TX_BD_FLAGS_HDR_POOL_SHIFT 5
2037#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
2038#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
2039#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
2040#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
2041};
2042
2043/*
2044 * The eth Tx Buffer Descriptor
2045 */
2046struct eth_tx_start_bd {
2047 __le32 addr_lo;
2048 __le32 addr_hi;
2049 __le16 nbd;
2050 __le16 nbytes;
2051 __le16 vlan;
2052 struct eth_tx_bd_flags bd_flags;
2053 u8 general_data;
2054#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
2055#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
2056#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
2057#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
2058};
2059
2060/*
2061 * Tx regular BD structure
2062 */
2063struct eth_tx_bd {
2064 u32 addr_lo;
2065 u32 addr_hi;
2066 u16 total_pkt_bytes;
2067 u16 nbytes;
2068 u8 reserved[4];
2069};
2070
2071/*
2072 * Tx parsing BD structure for ETH,Relevant in START
2073 */
2074struct eth_tx_parse_bd {
2075 u8 global_data;
2076#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0)
2077#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0
2078#define ETH_TX_PARSE_BD_UDP_CS_FLG (0x1<<4)
2079#define ETH_TX_PARSE_BD_UDP_CS_FLG_SHIFT 4
2080#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
2081#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
2082#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6)
2083#define ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT 6
2084#define ETH_TX_PARSE_BD_NS_FLG (0x1<<7)
2085#define ETH_TX_PARSE_BD_NS_FLG_SHIFT 7
2086 u8 tcp_flags;
2087#define ETH_TX_PARSE_BD_FIN_FLG (0x1<<0)
2088#define ETH_TX_PARSE_BD_FIN_FLG_SHIFT 0
2089#define ETH_TX_PARSE_BD_SYN_FLG (0x1<<1)
2090#define ETH_TX_PARSE_BD_SYN_FLG_SHIFT 1
2091#define ETH_TX_PARSE_BD_RST_FLG (0x1<<2)
2092#define ETH_TX_PARSE_BD_RST_FLG_SHIFT 2
2093#define ETH_TX_PARSE_BD_PSH_FLG (0x1<<3)
2094#define ETH_TX_PARSE_BD_PSH_FLG_SHIFT 3
2095#define ETH_TX_PARSE_BD_ACK_FLG (0x1<<4)
2096#define ETH_TX_PARSE_BD_ACK_FLG_SHIFT 4
2097#define ETH_TX_PARSE_BD_URG_FLG (0x1<<5)
2098#define ETH_TX_PARSE_BD_URG_FLG_SHIFT 5
2099#define ETH_TX_PARSE_BD_ECE_FLG (0x1<<6)
2100#define ETH_TX_PARSE_BD_ECE_FLG_SHIFT 6
2101#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7)
2102#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7
2103 u8 ip_hlen;
2104 s8 reserved;
2105 __le16 total_hlen;
2106 __le16 tcp_pseudo_csum;
2107 __le16 lso_mss;
2108 __le16 ip_id;
2109 __le32 tcp_send_seq;
2110};
2111
2112/*
2113 * The last BD in the BD memory will hold a pointer to the next BD memory
2114 */
2115struct eth_tx_next_bd {
2116 __le32 addr_lo;
2117 __le32 addr_hi;
2118 u8 reserved[8];
2119};
2120
2121/*
2122 * union for 4 Bd types
2123 */
2124union eth_tx_bd_types {
2125 struct eth_tx_start_bd start_bd;
2126 struct eth_tx_bd reg_bd;
2127 struct eth_tx_parse_bd parse_bd;
2128 struct eth_tx_next_bd next_bd;
2129};
2130
2131/*
2132 * The eth storm context of Xstorm
2133 */
2134struct xstorm_eth_st_context {
2135 u32 tx_bd_page_base_lo;
2136 u32 tx_bd_page_base_hi;
2137#if defined(__BIG_ENDIAN)
2138 u16 tx_bd_cons;
2139 u8 statistics_data;
2140#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2141#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2142#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2143#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2144 u8 __local_tx_bd_prod;
2145#elif defined(__LITTLE_ENDIAN)
2146 u8 __local_tx_bd_prod;
2147 u8 statistics_data;
2148#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2149#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2150#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2151#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2152 u16 tx_bd_cons;
2153#endif
2154 u32 __reserved1;
2155 u32 __reserved2;
2156#if defined(__BIG_ENDIAN)
2157 u8 __ram_cache_index;
2158 u8 __double_buffer_client;
2159 u16 __pkt_cons;
2160#elif defined(__LITTLE_ENDIAN)
2161 u16 __pkt_cons;
2162 u8 __double_buffer_client;
2163 u8 __ram_cache_index;
2164#endif
2165#if defined(__BIG_ENDIAN)
2166 u16 __statistics_address;
2167 u16 __gso_next;
2168#elif defined(__LITTLE_ENDIAN)
2169 u16 __gso_next;
2170 u16 __statistics_address;
2171#endif
2172#if defined(__BIG_ENDIAN)
2173 u8 __local_tx_bd_cons;
2174 u8 safc_group_num;
2175 u8 safc_group_en;
2176 u8 __is_eth_conn;
2177#elif defined(__LITTLE_ENDIAN)
2178 u8 __is_eth_conn;
2179 u8 safc_group_en;
2180 u8 safc_group_num;
2181 u8 __local_tx_bd_cons;
2182#endif
2183 union eth_tx_bd_types __bds[13];
2184};
2185
2186/*
2187 * The eth storm context of Cstorm
2188 */
2189struct cstorm_eth_st_context {
2190#if defined(__BIG_ENDIAN)
2191 u16 __reserved0;
2192 u8 sb_index_number;
2193 u8 status_block_id;
2194#elif defined(__LITTLE_ENDIAN)
2195 u8 status_block_id;
2196 u8 sb_index_number;
2197 u16 __reserved0;
2198#endif
2199 u32 __reserved1[3];
2200};
2201
2202/*
2203 * Ethernet connection context
2204 */
2205struct eth_context {
2206 struct ustorm_eth_st_context ustorm_st_context;
2207 struct tstorm_eth_st_context tstorm_st_context;
2208 struct xstorm_eth_ag_context xstorm_ag_context;
2209 struct tstorm_eth_ag_context tstorm_ag_context;
2210 struct cstorm_eth_ag_context cstorm_ag_context;
2211 struct ustorm_eth_ag_context ustorm_ag_context;
2212 struct timers_block_context timers_context;
2213 struct xstorm_eth_st_context xstorm_st_context;
2214 struct cstorm_eth_st_context cstorm_st_context;
2215};
2216
2217
2218/*
2219 * Ethernet doorbell
2220 */
2221struct eth_tx_doorbell {
2222#if defined(__BIG_ENDIAN)
2223 u16 npackets;
2224 u8 params;
2225#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
2226#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
2227#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
2228#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
2229#define ETH_TX_DOORBELL_SPARE (0x1<<7)
2230#define ETH_TX_DOORBELL_SPARE_SHIFT 7
2231 struct doorbell_hdr hdr;
2232#elif defined(__LITTLE_ENDIAN)
2233 struct doorbell_hdr hdr;
2234 u8 params;
2235#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
2236#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
2237#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
2238#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
2239#define ETH_TX_DOORBELL_SPARE (0x1<<7)
2240#define ETH_TX_DOORBELL_SPARE_SHIFT 7
2241 u16 npackets;
2242#endif
2243};
2244
2245
2246/*
2247 * cstorm default status block, generated by ustorm
2248 */
2249struct cstorm_def_status_block_u {
2250 __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES];
2251 __le16 status_block_index;
2252 u8 func;
2253 u8 status_block_id;
2254 __le32 __flags;
2255};
2256
2257/*
2258 * cstorm default status block, generated by cstorm
2259 */
2260struct cstorm_def_status_block_c {
2261 __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES];
2262 __le16 status_block_index;
2263 u8 func;
2264 u8 status_block_id;
2265 __le32 __flags;
2266};
2267
2268/*
2269 * xstorm status block
2270 */
2271struct xstorm_def_status_block {
2272 __le16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES];
2273 __le16 status_block_index;
2274 u8 func;
2275 u8 status_block_id;
2276 __le32 __flags;
2277};
2278
2279/*
2280 * tstorm status block
2281 */
2282struct tstorm_def_status_block {
2283 __le16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES];
2284 __le16 status_block_index;
2285 u8 func;
2286 u8 status_block_id;
2287 __le32 __flags;
2288};
2289
2290/*
2291 * host status block
2292 */
2293struct host_def_status_block {
2294 struct atten_def_status_block atten_status_block;
2295 struct cstorm_def_status_block_u u_def_status_block;
2296 struct cstorm_def_status_block_c c_def_status_block;
2297 struct xstorm_def_status_block x_def_status_block;
2298 struct tstorm_def_status_block t_def_status_block;
2299};
2300
2301
2302/*
2303 * cstorm status block, generated by ustorm
2304 */
2305struct cstorm_status_block_u {
2306 __le16 index_values[HC_USTORM_SB_NUM_INDICES];
2307 __le16 status_block_index;
2308 u8 func;
2309 u8 status_block_id;
2310 __le32 __flags;
2311};
2312
2313/*
2314 * cstorm status block, generated by cstorm
2315 */
2316struct cstorm_status_block_c {
2317 __le16 index_values[HC_CSTORM_SB_NUM_INDICES];
2318 __le16 status_block_index;
2319 u8 func;
2320 u8 status_block_id;
2321 __le32 __flags;
2322};
2323
2324/*
2325 * host status block
2326 */
2327struct host_status_block {
2328 struct cstorm_status_block_u u_status_block;
2329 struct cstorm_status_block_c c_status_block;
2330};
2331
2332
2333/*
2334 * The data for RSS setup ramrod
2335 */
2336struct eth_client_setup_ramrod_data {
2337 u32 client_id;
2338 u8 is_rdma;
2339 u8 is_fcoe;
2340 u16 reserved1;
2341};
2342
2343
2344/*
2345 * regular eth FP CQE parameters struct
2346 */
2347struct eth_fast_path_rx_cqe {
2348 u8 type_error_flags;
2349#define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0)
2350#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
2351#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1)
2352#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1
2353#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2)
2354#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2
2355#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3)
2356#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3
2357#define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4)
2358#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
2359#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
2360#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
2361#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
2362#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
2363 u8 status_flags;
2364#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
2365#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
2366#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
2367#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
2368#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
2369#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
2370#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
2371#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
2372#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
2373#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
2374#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
2375#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
2376 u8 placement_offset;
2377 u8 queue_index;
2378 __le32 rss_hash_result;
2379 __le16 vlan_tag;
2380 __le16 pkt_len;
2381 __le16 len_on_bd;
2382 struct parsing_flags pars_flags;
2383 __le16 sgl[8];
2384};
2385
2386
2387/*
2388 * The data for RSS setup ramrod
2389 */
2390struct eth_halt_ramrod_data {
2391 u32 client_id;
2392 u32 reserved0;
2393};
2394
2395
2396/*
2397 * The data for statistics query ramrod
2398 */
2399struct eth_query_ramrod_data {
2400#if defined(__BIG_ENDIAN)
2401 u8 reserved0;
2402 u8 collect_port;
2403 u16 drv_counter;
2404#elif defined(__LITTLE_ENDIAN)
2405 u16 drv_counter;
2406 u8 collect_port;
2407 u8 reserved0;
2408#endif
2409 u32 ctr_id_vector;
2410};
2411
2412
2413/*
2414 * Place holder for ramrods protocol specific data
2415 */
2416struct ramrod_data {
2417 __le32 data_lo;
2418 __le32 data_hi;
2419};
2420
2421/*
2422 * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
2423 */
2424union eth_ramrod_data {
2425 struct ramrod_data general;
2426};
2427
2428
2429/*
2430 * Eth Rx Cqe structure- general structure for ramrods
2431 */
2432struct common_ramrod_eth_rx_cqe {
2433 u8 ramrod_type;
2434#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
2435#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
2436#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1)
2437#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1
2438#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2)
2439#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2
2440 u8 conn_type;
2441 __le16 reserved1;
2442 __le32 conn_and_cmd_data;
2443#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
2444#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
2445#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
2446#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
2447 struct ramrod_data protocol_data;
2448 __le32 reserved2[4];
2449};
2450
2451/*
2452 * Rx Last CQE in page (in ETH)
2453 */
2454struct eth_rx_cqe_next_page {
2455 __le32 addr_lo;
2456 __le32 addr_hi;
2457 __le32 reserved[6];
2458};
2459
2460/*
2461 * union for all eth rx cqe types (fix their sizes)
2462 */
2463union eth_rx_cqe {
2464 struct eth_fast_path_rx_cqe fast_path_cqe;
2465 struct common_ramrod_eth_rx_cqe ramrod_cqe;
2466 struct eth_rx_cqe_next_page next_page_cqe;
2467};
2468
2469
2470/*
2471 * common data for all protocols
2472 */
2473struct spe_hdr {
2474 __le32 conn_and_cmd_data;
2475#define SPE_HDR_CID (0xFFFFFF<<0)
2476#define SPE_HDR_CID_SHIFT 0
2477#define SPE_HDR_CMD_ID (0xFF<<24)
2478#define SPE_HDR_CMD_ID_SHIFT 24
2479 __le16 type;
2480#define SPE_HDR_CONN_TYPE (0xFF<<0)
2481#define SPE_HDR_CONN_TYPE_SHIFT 0
2482#define SPE_HDR_COMMON_RAMROD (0xFF<<8)
2483#define SPE_HDR_COMMON_RAMROD_SHIFT 8
2484 __le16 reserved;
2485};
2486
2487/*
2488 * Ethernet slow path element
2489 */
2490union eth_specific_data {
2491 u8 protocol_data[8];
2492 struct regpair mac_config_addr;
2493 struct eth_client_setup_ramrod_data client_setup_ramrod_data;
2494 struct eth_halt_ramrod_data halt_ramrod_data;
2495 struct regpair leading_cqe_addr;
2496 struct regpair update_data_addr;
2497 struct eth_query_ramrod_data query_ramrod_data;
2498};
2499
2500/*
2501 * Ethernet slow path element
2502 */
2503struct eth_spe {
2504 struct spe_hdr hdr;
2505 union eth_specific_data data;
2506};
2507
2508
2509/*
2510 * array of 13 bds as appears in the eth xstorm context
2511 */
2512struct eth_tx_bds_array {
2513 union eth_tx_bd_types bds[13];
2514};
2515
2516
2517/*
2518 * Common configuration parameters per function in Tstorm
2519 */
2520struct tstorm_eth_function_common_config {
2521#if defined(__BIG_ENDIAN)
2522 u8 leading_client_id;
2523 u8 rss_result_mask;
2524 u16 config_flags;
2525#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
2526#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
2527#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
2528#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
2529#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
2530#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
2531#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
2532#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2533#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2534#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2535#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
2536#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
2537#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
2538#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
2539#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
2540#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
2541#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2542#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2543#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2544#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2545#elif defined(__LITTLE_ENDIAN)
2546 u16 config_flags;
2547#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
2548#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
2549#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
2550#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
2551#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
2552#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
2553#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
2554#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2555#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2556#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2557#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
2558#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
2559#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
2560#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
2561#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
2562#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
2563#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2564#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2565#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2566#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2567 u8 rss_result_mask;
2568 u8 leading_client_id;
2569#endif
2570 u16 vlan_id[2];
2571};
2572
2573/*
2574 * RSS idirection table update configuration
2575 */
2576struct rss_update_config {
2577#if defined(__BIG_ENDIAN)
2578 u16 toe_rss_bitmap;
2579 u16 flags;
2580#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
2581#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
2582#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
2583#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
2584#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
2585#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
2586#elif defined(__LITTLE_ENDIAN)
2587 u16 flags;
2588#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
2589#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
2590#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
2591#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
2592#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
2593#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
2594 u16 toe_rss_bitmap;
2595#endif
2596 u32 reserved1;
2597};
2598
2599/*
2600 * parameters for eth update ramrod
2601 */
2602struct eth_update_ramrod_data {
2603 struct tstorm_eth_function_common_config func_config;
2604 u8 indirectionTable[128];
2605 struct rss_update_config rss_config;
2606};
2607
2608
2609/*
2610 * MAC filtering configuration command header
2611 */
2612struct mac_configuration_hdr {
2613 u8 length;
2614 u8 offset;
2615 u16 client_id;
2616 u32 reserved1;
2617};
2618
2619/*
2620 * MAC address in list for ramrod
2621 */
2622struct tstorm_cam_entry {
2623 __le16 lsb_mac_addr;
2624 __le16 middle_mac_addr;
2625 __le16 msb_mac_addr;
2626 __le16 flags;
2627#define TSTORM_CAM_ENTRY_PORT_ID (0x1<<0)
2628#define TSTORM_CAM_ENTRY_PORT_ID_SHIFT 0
2629#define TSTORM_CAM_ENTRY_RSRVVAL0 (0x7<<1)
2630#define TSTORM_CAM_ENTRY_RSRVVAL0_SHIFT 1
2631#define TSTORM_CAM_ENTRY_RESERVED0 (0xFFF<<4)
2632#define TSTORM_CAM_ENTRY_RESERVED0_SHIFT 4
2633};
2634
2635/*
2636 * MAC filtering: CAM target table entry
2637 */
2638struct tstorm_cam_target_table_entry {
2639 u8 flags;
2640#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST (0x1<<0)
2641#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST_SHIFT 0
2642#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<1)
2643#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 1
2644#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE (0x1<<2)
2645#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE_SHIFT 2
2646#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC (0x1<<3)
2647#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3
2648#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4)
2649#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4
2650 u8 reserved1;
2651 u16 vlan_id;
2652 u32 clients_bit_vector;
2653};
2654
2655/*
2656 * MAC address in list for ramrod
2657 */
2658struct mac_configuration_entry {
2659 struct tstorm_cam_entry cam_entry;
2660 struct tstorm_cam_target_table_entry target_table_entry;
2661};
2662
2663/*
2664 * MAC filtering configuration command
2665 */
2666struct mac_configuration_cmd {
2667 struct mac_configuration_hdr hdr;
2668 struct mac_configuration_entry config_table[64];
2669};
2670
2671
2672/*
2673 * MAC address in list for ramrod
2674 */
2675struct mac_configuration_entry_e1h {
2676 __le16 lsb_mac_addr;
2677 __le16 middle_mac_addr;
2678 __le16 msb_mac_addr;
2679 __le16 vlan_id;
2680 __le16 e1hov_id;
2681 u8 reserved0;
2682 u8 flags;
2683#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0)
2684#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0
2685#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1)
2686#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1
2687#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2)
2688#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2
2689#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1 (0x1F<<3)
2690#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1_SHIFT 3
2691 u32 clients_bit_vector;
2692};
2693
2694/*
2695 * MAC filtering configuration command
2696 */
2697struct mac_configuration_cmd_e1h {
2698 struct mac_configuration_hdr hdr;
2699 struct mac_configuration_entry_e1h config_table[32];
2700};
2701
2702
2703/*
2704 * approximate-match multicast filtering for E1H per function in Tstorm
2705 */
2706struct tstorm_eth_approximate_match_multicast_filtering {
2707 u32 mcast_add_hash_bit_array[8];
2708};
2709
2710
2711/*
2712 * Configuration parameters per client in Tstorm
2713 */
2714struct tstorm_eth_client_config {
2715#if defined(__BIG_ENDIAN)
2716 u8 reserved0;
2717 u8 statistics_counter_id;
2718 u16 mtu;
2719#elif defined(__LITTLE_ENDIAN)
2720 u16 mtu;
2721 u8 statistics_counter_id;
2722 u8 reserved0;
2723#endif
2724#if defined(__BIG_ENDIAN)
2725 u16 drop_flags;
2726#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2727#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2728#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2729#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2730#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2731#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2732#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2733#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2734#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2735#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2736 u16 config_flags;
2737#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2738#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2739#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2740#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2741#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2742#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2743#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2744#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2745#elif defined(__LITTLE_ENDIAN)
2746 u16 config_flags;
2747#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2748#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2749#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2750#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2751#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2752#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2753#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2754#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2755 u16 drop_flags;
2756#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2757#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2758#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2759#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2760#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2761#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2762#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2763#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2764#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2765#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2766#endif
2767};
2768
2769
2770/*
2771 * MAC filtering configuration parameters per port in Tstorm
2772 */
2773struct tstorm_eth_mac_filter_config {
2774 u32 ucast_drop_all;
2775 u32 ucast_accept_all;
2776 u32 mcast_drop_all;
2777 u32 mcast_accept_all;
2778 u32 bcast_drop_all;
2779 u32 bcast_accept_all;
2780 u32 strict_vlan;
2781 u32 vlan_filter[2];
2782 u32 reserved;
2783};
2784
2785
2786/*
2787 * common flag to indicate existance of TPA.
2788 */
2789struct tstorm_eth_tpa_exist {
2790#if defined(__BIG_ENDIAN)
2791 u16 reserved1;
2792 u8 reserved0;
2793 u8 tpa_exist;
2794#elif defined(__LITTLE_ENDIAN)
2795 u8 tpa_exist;
2796 u8 reserved0;
2797 u16 reserved1;
2798#endif
2799 u32 reserved2;
2800};
2801
2802
2803/*
2804 * rx rings pause data for E1h only
2805 */
2806struct ustorm_eth_rx_pause_data_e1h {
2807#if defined(__BIG_ENDIAN)
2808 u16 bd_thr_low;
2809 u16 cqe_thr_low;
2810#elif defined(__LITTLE_ENDIAN)
2811 u16 cqe_thr_low;
2812 u16 bd_thr_low;
2813#endif
2814#if defined(__BIG_ENDIAN)
2815 u16 cos;
2816 u16 sge_thr_low;
2817#elif defined(__LITTLE_ENDIAN)
2818 u16 sge_thr_low;
2819 u16 cos;
2820#endif
2821#if defined(__BIG_ENDIAN)
2822 u16 bd_thr_high;
2823 u16 cqe_thr_high;
2824#elif defined(__LITTLE_ENDIAN)
2825 u16 cqe_thr_high;
2826 u16 bd_thr_high;
2827#endif
2828#if defined(__BIG_ENDIAN)
2829 u16 reserved0;
2830 u16 sge_thr_high;
2831#elif defined(__LITTLE_ENDIAN)
2832 u16 sge_thr_high;
2833 u16 reserved0;
2834#endif
2835};
2836
2837
2838/*
2839 * Three RX producers for ETH
2840 */
2841struct ustorm_eth_rx_producers {
2842#if defined(__BIG_ENDIAN)
2843 u16 bd_prod;
2844 u16 cqe_prod;
2845#elif defined(__LITTLE_ENDIAN)
2846 u16 cqe_prod;
2847 u16 bd_prod;
2848#endif
2849#if defined(__BIG_ENDIAN)
2850 u16 reserved;
2851 u16 sge_prod;
2852#elif defined(__LITTLE_ENDIAN)
2853 u16 sge_prod;
2854 u16 reserved;
2855#endif
2856};
2857
2858
2859/*
2860 * per-port SAFC demo variables
2861 */
2862struct cmng_flags_per_port {
2863 u8 con_number[NUM_OF_PROTOCOLS];
2864 u32 cmng_enables;
2865#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
2866#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
2867#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
2868#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
2869#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL (0x1<<2)
2870#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL_SHIFT 2
2871#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL (0x1<<3)
2872#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
2873#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
2874#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
2875#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x7FFFFFF<<5)
2876#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 5
2877};
2878
2879
2880/*
2881 * per-port rate shaping variables
2882 */
2883struct rate_shaping_vars_per_port {
2884 u32 rs_periodic_timeout;
2885 u32 rs_threshold;
2886};
2887
2888/*
2889 * per-port fairness variables
2890 */
2891struct fairness_vars_per_port {
2892 u32 upper_bound;
2893 u32 fair_threshold;
2894 u32 fairness_timeout;
2895};
2896
2897/*
2898 * per-port SAFC variables
2899 */
2900struct safc_struct_per_port {
2901#if defined(__BIG_ENDIAN)
2902 u16 __reserved1;
2903 u8 __reserved0;
2904 u8 safc_timeout_usec;
2905#elif defined(__LITTLE_ENDIAN)
2906 u8 safc_timeout_usec;
2907 u8 __reserved0;
2908 u16 __reserved1;
2909#endif
2910 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
2911};
2912
2913/*
2914 * Per-port congestion management variables
2915 */
2916struct cmng_struct_per_port {
2917 struct rate_shaping_vars_per_port rs_vars;
2918 struct fairness_vars_per_port fair_vars;
2919 struct safc_struct_per_port safc_vars;
2920 struct cmng_flags_per_port flags;
2921};
2922
2923
2924/*
2925 * Dynamic host coalescing init parameters
2926 */
2927struct dynamic_hc_config {
2928 u32 threshold[3];
2929 u8 shift_per_protocol[HC_USTORM_SB_NUM_INDICES];
2930 u8 hc_timeout0[HC_USTORM_SB_NUM_INDICES];
2931 u8 hc_timeout1[HC_USTORM_SB_NUM_INDICES];
2932 u8 hc_timeout2[HC_USTORM_SB_NUM_INDICES];
2933 u8 hc_timeout3[HC_USTORM_SB_NUM_INDICES];
2934};
2935
2936
2937/*
2938 * Protocol-common statistics collected by the Xstorm (per client)
2939 */
2940struct xstorm_per_client_stats {
2941 __le32 reserved0;
2942 __le32 unicast_pkts_sent;
2943 struct regpair unicast_bytes_sent;
2944 struct regpair multicast_bytes_sent;
2945 __le32 multicast_pkts_sent;
2946 __le32 broadcast_pkts_sent;
2947 struct regpair broadcast_bytes_sent;
2948 __le16 stats_counter;
2949 __le16 reserved1;
2950 __le32 reserved2;
2951};
2952
2953/*
2954 * Common statistics collected by the Xstorm (per port)
2955 */
2956struct xstorm_common_stats {
2957 struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID];
2958};
2959
2960/*
2961 * Protocol-common statistics collected by the Tstorm (per port)
2962 */
2963struct tstorm_per_port_stats {
2964 __le32 mac_filter_discard;
2965 __le32 xxoverflow_discard;
2966 __le32 brb_truncate_discard;
2967 __le32 mac_discard;
2968};
2969
2970/*
2971 * Protocol-common statistics collected by the Tstorm (per client)
2972 */
2973struct tstorm_per_client_stats {
2974 struct regpair rcv_unicast_bytes;
2975 struct regpair rcv_broadcast_bytes;
2976 struct regpair rcv_multicast_bytes;
2977 struct regpair rcv_error_bytes;
2978 __le32 checksum_discard;
2979 __le32 packets_too_big_discard;
2980 __le32 rcv_unicast_pkts;
2981 __le32 rcv_broadcast_pkts;
2982 __le32 rcv_multicast_pkts;
2983 __le32 no_buff_discard;
2984 __le32 ttl0_discard;
2985 __le16 stats_counter;
2986 __le16 reserved0;
2987};
2988
2989/*
2990 * Protocol-common statistics collected by the Tstorm
2991 */
2992struct tstorm_common_stats {
2993 struct tstorm_per_port_stats port_statistics;
2994 struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID];
2995};
2996
2997/*
2998 * Protocol-common statistics collected by the Ustorm (per client)
2999 */
3000struct ustorm_per_client_stats {
3001 struct regpair ucast_no_buff_bytes;
3002 struct regpair mcast_no_buff_bytes;
3003 struct regpair bcast_no_buff_bytes;
3004 __le32 ucast_no_buff_pkts;
3005 __le32 mcast_no_buff_pkts;
3006 __le32 bcast_no_buff_pkts;
3007 __le16 stats_counter;
3008 __le16 reserved0;
3009};
3010
3011/*
3012 * Protocol-common statistics collected by the Ustorm
3013 */
3014struct ustorm_common_stats {
3015 struct ustorm_per_client_stats client_statistics[MAX_U_STAT_COUNTER_ID];
3016};
3017
3018/*
3019 * Eth statistics query structure for the eth_stats_query ramrod
3020 */
3021struct eth_stats_query {
3022 struct xstorm_common_stats xstorm_common;
3023 struct tstorm_common_stats tstorm_common;
3024 struct ustorm_common_stats ustorm_common;
3025};
3026
3027
3028/*
3029 * per-vnic fairness variables
3030 */
3031struct fairness_vars_per_vn {
3032 u32 cos_credit_delta[MAX_COS_NUMBER];
3033 u32 protocol_credit_delta[NUM_OF_PROTOCOLS];
3034 u32 vn_credit_delta;
3035 u32 __reserved0;
3036};
3037
3038
3039/*
3040 * FW version stored in the Xstorm RAM
3041 */
3042struct fw_version {
3043#if defined(__BIG_ENDIAN)
3044 u8 engineering;
3045 u8 revision;
3046 u8 minor;
3047 u8 major;
3048#elif defined(__LITTLE_ENDIAN)
3049 u8 major;
3050 u8 minor;
3051 u8 revision;
3052 u8 engineering;
3053#endif
3054 u32 flags;
3055#define FW_VERSION_OPTIMIZED (0x1<<0)
3056#define FW_VERSION_OPTIMIZED_SHIFT 0
3057#define FW_VERSION_BIG_ENDIEN (0x1<<1)
3058#define FW_VERSION_BIG_ENDIEN_SHIFT 1
3059#define FW_VERSION_CHIP_VERSION (0x3<<2)
3060#define FW_VERSION_CHIP_VERSION_SHIFT 2
3061#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
3062#define __FW_VERSION_RESERVED_SHIFT 4
3063};
3064
3065
3066/*
3067 * FW version stored in first line of pram
3068 */
3069struct pram_fw_version {
3070 u8 major;
3071 u8 minor;
3072 u8 revision;
3073 u8 engineering;
3074 u8 flags;
3075#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
3076#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
3077#define PRAM_FW_VERSION_STORM_ID (0x3<<1)
3078#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
3079#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
3080#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
3081#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
3082#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
3083#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
3084#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
3085};
3086
3087
3088/*
3089 * The send queue element
3090 */
3091struct protocol_common_spe {
3092 struct spe_hdr hdr;
3093 struct regpair phy_address;
3094};
3095
3096
3097/*
3098 * a single rate shaping counter. can be used as protocol or vnic counter
3099 */
3100struct rate_shaping_counter {
3101 u32 quota;
3102#if defined(__BIG_ENDIAN)
3103 u16 __reserved0;
3104 u16 rate;
3105#elif defined(__LITTLE_ENDIAN)
3106 u16 rate;
3107 u16 __reserved0;
3108#endif
3109};
3110
3111
3112/*
3113 * per-vnic rate shaping variables
3114 */
3115struct rate_shaping_vars_per_vn {
3116 struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS];
3117 struct rate_shaping_counter vn_counter;
3118};
3119
3120
3121/*
3122 * The send queue element
3123 */
3124struct slow_path_element {
3125 struct spe_hdr hdr;
3126 u8 protocol_data[8];
3127};
3128
3129
3130/*
3131 * eth/toe flags that indicate if to query
3132 */
3133struct stats_indication_flags {
3134 u32 collect_eth;
3135 u32 collect_toe;
3136};
3137
3138
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
new file mode 100644
index 00000000000..65b26cbfe3e
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -0,0 +1,152 @@
1/* bnx2x_init.h: Broadcom Everest network driver.
2 * Structures and macroes needed during the initialization.
3 *
4 * Copyright (c) 2007-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
11 * Written by: Eliezer Tamir
12 * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
13 */
14
15#ifndef BNX2X_INIT_H
16#define BNX2X_INIT_H
17
18/* RAM0 size in bytes */
19#define STORM_INTMEM_SIZE_E1 0x5800
20#define STORM_INTMEM_SIZE_E1H 0x10000
21#define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \
22 STORM_INTMEM_SIZE_E1H) / 4)
23
24
25/* Init operation types and structures */
26/* Common for both E1 and E1H */
27#define OP_RD 0x1 /* read single register */
28#define OP_WR 0x2 /* write single register */
29#define OP_IW 0x3 /* write single register using mailbox */
30#define OP_SW 0x4 /* copy a string to the device */
31#define OP_SI 0x5 /* copy a string using mailbox */
32#define OP_ZR 0x6 /* clear memory */
33#define OP_ZP 0x7 /* unzip then copy with DMAE */
34#define OP_WR_64 0x8 /* write 64 bit pattern */
35#define OP_WB 0x9 /* copy a string using DMAE */
36
37/* FPGA and EMUL specific operations */
38#define OP_WR_EMUL 0xa /* write single register on Emulation */
39#define OP_WR_FPGA 0xb /* write single register on FPGA */
40#define OP_WR_ASIC 0xc /* write single register on ASIC */
41
42/* Init stages */
43/* Never reorder stages !!! */
44#define COMMON_STAGE 0
45#define PORT0_STAGE 1
46#define PORT1_STAGE 2
47#define FUNC0_STAGE 3
48#define FUNC1_STAGE 4
49#define FUNC2_STAGE 5
50#define FUNC3_STAGE 6
51#define FUNC4_STAGE 7
52#define FUNC5_STAGE 8
53#define FUNC6_STAGE 9
54#define FUNC7_STAGE 10
55#define STAGE_IDX_MAX 11
56
57#define STAGE_START 0
58#define STAGE_END 1
59
60
61/* Indices of blocks */
62#define PRS_BLOCK 0
63#define SRCH_BLOCK 1
64#define TSDM_BLOCK 2
65#define TCM_BLOCK 3
66#define BRB1_BLOCK 4
67#define TSEM_BLOCK 5
68#define PXPCS_BLOCK 6
69#define EMAC0_BLOCK 7
70#define EMAC1_BLOCK 8
71#define DBU_BLOCK 9
72#define MISC_BLOCK 10
73#define DBG_BLOCK 11
74#define NIG_BLOCK 12
75#define MCP_BLOCK 13
76#define UPB_BLOCK 14
77#define CSDM_BLOCK 15
78#define USDM_BLOCK 16
79#define CCM_BLOCK 17
80#define UCM_BLOCK 18
81#define USEM_BLOCK 19
82#define CSEM_BLOCK 20
83#define XPB_BLOCK 21
84#define DQ_BLOCK 22
85#define TIMERS_BLOCK 23
86#define XSDM_BLOCK 24
87#define QM_BLOCK 25
88#define PBF_BLOCK 26
89#define XCM_BLOCK 27
90#define XSEM_BLOCK 28
91#define CDU_BLOCK 29
92#define DMAE_BLOCK 30
93#define PXP_BLOCK 31
94#define CFC_BLOCK 32
95#define HC_BLOCK 33
96#define PXP2_BLOCK 34
97#define MISC_AEU_BLOCK 35
98#define PGLUE_B_BLOCK 36
99#define IGU_BLOCK 37
100
101
102/* Returns the index of start or end of a specific block stage in ops array*/
103#define BLOCK_OPS_IDX(block, stage, end) \
104 (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
105
106
107struct raw_op {
108 u32 op:8;
109 u32 offset:24;
110 u32 raw_data;
111};
112
113struct op_read {
114 u32 op:8;
115 u32 offset:24;
116 u32 pad;
117};
118
119struct op_write {
120 u32 op:8;
121 u32 offset:24;
122 u32 val;
123};
124
125struct op_string_write {
126 u32 op:8;
127 u32 offset:24;
128#ifdef __LITTLE_ENDIAN
129 u16 data_off;
130 u16 data_len;
131#else /* __BIG_ENDIAN */
132 u16 data_len;
133 u16 data_off;
134#endif
135};
136
137struct op_zero {
138 u32 op:8;
139 u32 offset:24;
140 u32 len;
141};
142
143union init_op {
144 struct op_read read;
145 struct op_write write;
146 struct op_string_write str_wr;
147 struct op_zero zero;
148 struct raw_op raw;
149};
150
151#endif /* BNX2X_INIT_H */
152
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
new file mode 100644
index 00000000000..2b1363a6fe7
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -0,0 +1,506 @@
1/* bnx2x_init_ops.h: Broadcom Everest network driver.
2 * Static functions needed during the initialization.
3 * This file is "included" in bnx2x_main.c.
4 *
5 * Copyright (c) 2007-2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
12 * Written by: Vladislav Zolotarov <vladz@broadcom.com>
13 */
14
15#ifndef BNX2X_INIT_OPS_H
16#define BNX2X_INIT_OPS_H
17
18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
19
20
21static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
22 u32 len)
23{
24 u32 i;
25
26 for (i = 0; i < len; i++)
27 REG_WR(bp, addr + i*4, data[i]);
28}
29
30static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
31 u32 len)
32{
33 u32 i;
34
35 for (i = 0; i < len; i++)
36 REG_WR_IND(bp, addr + i*4, data[i]);
37}
38
39static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
40{
41 if (bp->dmae_ready)
42 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
43 else
44 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
45}
46
47static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
48{
49 u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
50 u32 buf_len32 = buf_len/4;
51 u32 i;
52
53 memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
54
55 for (i = 0; i < len; i += buf_len32) {
56 u32 cur_len = min(buf_len32, len - i);
57
58 bnx2x_write_big_buf(bp, addr + i*4, cur_len);
59 }
60}
61
62static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
63 u32 len64)
64{
65 u32 buf_len32 = FW_BUF_SIZE/4;
66 u32 len = len64*2;
67 u64 data64 = 0;
68 u32 i;
69
70 /* 64 bit value is in a blob: first low DWORD, then high DWORD */
71 data64 = HILO_U64((*(data + 1)), (*data));
72
73 len64 = min((u32)(FW_BUF_SIZE/8), len64);
74 for (i = 0; i < len64; i++) {
75 u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
76
77 *pdata = data64;
78 }
79
80 for (i = 0; i < len; i += buf_len32) {
81 u32 cur_len = min(buf_len32, len - i);
82
83 bnx2x_write_big_buf(bp, addr + i*4, cur_len);
84 }
85}
86
87/*********************************************************
88 There are different blobs for each PRAM section.
89 In addition, each blob write operation is divided into a few operations
90 in order to decrease the amount of phys. contiguous buffer needed.
91 Thus, when we select a blob the address may be with some offset
92 from the beginning of PRAM section.
93 The same holds for the INT_TABLE sections.
94**********************************************************/
95#define IF_IS_INT_TABLE_ADDR(base, addr) \
96 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
97
98#define IF_IS_PRAM_ADDR(base, addr) \
99 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
100
101static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
102{
103 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
104 data = INIT_TSEM_INT_TABLE_DATA(bp);
105 else
106 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
107 data = INIT_CSEM_INT_TABLE_DATA(bp);
108 else
109 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
110 data = INIT_USEM_INT_TABLE_DATA(bp);
111 else
112 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
113 data = INIT_XSEM_INT_TABLE_DATA(bp);
114 else
115 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
116 data = INIT_TSEM_PRAM_DATA(bp);
117 else
118 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
119 data = INIT_CSEM_PRAM_DATA(bp);
120 else
121 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
122 data = INIT_USEM_PRAM_DATA(bp);
123 else
124 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
125 data = INIT_XSEM_PRAM_DATA(bp);
126
127 return data;
128}
129
130static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
131{
132 if (bp->dmae_ready)
133 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
134 else
135 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
136}
137
138static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
139 u32 len)
140{
141 const u32 *old_data = data;
142
143 data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
144
145 if (bp->dmae_ready) {
146 if (old_data != data)
147 VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
148 else
149 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
150 } else
151 bnx2x_init_ind_wr(bp, addr, data, len);
152}
153
154static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
155{
156 const u8 *data = NULL;
157 int rc;
158 u32 i;
159
160 data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
161
162 rc = bnx2x_gunzip(bp, data, len);
163 if (rc)
164 return;
165
166 /* gunzip_outlen is in dwords */
167 len = GUNZIP_OUTLEN(bp);
168 for (i = 0; i < len; i++)
169 ((u32 *)GUNZIP_BUF(bp))[i] =
170 cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
171
172 bnx2x_write_big_buf_wb(bp, addr, len);
173}
174
175static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
176{
177 u16 op_start =
178 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
179 u16 op_end =
180 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
181 union init_op *op;
182 int hw_wr;
183 u32 i, op_type, addr, len;
184 const u32 *data, *data_base;
185
186 /* If empty block */
187 if (op_start == op_end)
188 return;
189
190 if (CHIP_REV_IS_FPGA(bp))
191 hw_wr = OP_WR_FPGA;
192 else if (CHIP_REV_IS_EMUL(bp))
193 hw_wr = OP_WR_EMUL;
194 else
195 hw_wr = OP_WR_ASIC;
196
197 data_base = INIT_DATA(bp);
198
199 for (i = op_start; i < op_end; i++) {
200
201 op = (union init_op *)&(INIT_OPS(bp)[i]);
202
203 op_type = op->str_wr.op;
204 addr = op->str_wr.offset;
205 len = op->str_wr.data_len;
206 data = data_base + op->str_wr.data_off;
207
208 /* HW/EMUL specific */
209 if ((op_type > OP_WB) && (op_type == hw_wr))
210 op_type = OP_WR;
211
212 switch (op_type) {
213 case OP_RD:
214 REG_RD(bp, addr);
215 break;
216 case OP_WR:
217 REG_WR(bp, addr, op->write.val);
218 break;
219 case OP_SW:
220 bnx2x_init_str_wr(bp, addr, data, len);
221 break;
222 case OP_WB:
223 bnx2x_init_wr_wb(bp, addr, data, len);
224 break;
225 case OP_SI:
226 bnx2x_init_ind_wr(bp, addr, data, len);
227 break;
228 case OP_ZR:
229 bnx2x_init_fill(bp, addr, 0, op->zero.len);
230 break;
231 case OP_ZP:
232 bnx2x_init_wr_zp(bp, addr, len,
233 op->str_wr.data_off);
234 break;
235 case OP_WR_64:
236 bnx2x_init_wr_64(bp, addr, data, len);
237 break;
238 default:
239 /* happens whenever an op is of a diff HW */
240 break;
241 }
242 }
243}
244
245
246/****************************************************************************
247* PXP Arbiter
248****************************************************************************/
249/*
250 * This code configures the PCI read/write arbiter
251 * which implements a weighted round robin
252 * between the virtual queues in the chip.
253 *
254 * The values were derived for each PCI max payload and max request size.
255 * since max payload and max request size are only known at run time,
256 * this is done as a separate init stage.
257 */
258
259#define NUM_WR_Q 13
260#define NUM_RD_Q 29
261#define MAX_RD_ORD 3
262#define MAX_WR_ORD 2
263
264/* configuration for one arbiter queue */
265struct arb_line {
266 int l;
267 int add;
268 int ubound;
269};
270
271/* derived configuration for each read queue for each max request size */
272static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
273/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
274 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
275 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
276 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
277 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
278 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
279 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
280 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
281 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
282/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
283 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
284 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
285 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
286 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
287 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
288 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
289 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
290 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
291 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
292/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
293 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
294 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
295 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
296 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
297 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
298 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
299 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
300 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
301 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
302};
303
304/* derived configuration for each write queue for each max request size */
305static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
306/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
307 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
308 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
309 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
310 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
311 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
312 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
313 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
314 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
315/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
316 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
317 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
318 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
319};
320
321/* register addresses for read queues */
322static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
323/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
324 PXP2_REG_RQ_BW_RD_UBOUND0},
325 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
326 PXP2_REG_PSWRQ_BW_UB1},
327 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
328 PXP2_REG_PSWRQ_BW_UB2},
329 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
330 PXP2_REG_PSWRQ_BW_UB3},
331 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
332 PXP2_REG_RQ_BW_RD_UBOUND4},
333 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
334 PXP2_REG_RQ_BW_RD_UBOUND5},
335 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
336 PXP2_REG_PSWRQ_BW_UB6},
337 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
338 PXP2_REG_PSWRQ_BW_UB7},
339 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
340 PXP2_REG_PSWRQ_BW_UB8},
341/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
342 PXP2_REG_PSWRQ_BW_UB9},
343 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
344 PXP2_REG_PSWRQ_BW_UB10},
345 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
346 PXP2_REG_PSWRQ_BW_UB11},
347 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
348 PXP2_REG_RQ_BW_RD_UBOUND12},
349 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
350 PXP2_REG_RQ_BW_RD_UBOUND13},
351 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
352 PXP2_REG_RQ_BW_RD_UBOUND14},
353 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
354 PXP2_REG_RQ_BW_RD_UBOUND15},
355 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
356 PXP2_REG_RQ_BW_RD_UBOUND16},
357 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
358 PXP2_REG_RQ_BW_RD_UBOUND17},
359 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
360 PXP2_REG_RQ_BW_RD_UBOUND18},
361/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
362 PXP2_REG_RQ_BW_RD_UBOUND19},
363 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
364 PXP2_REG_RQ_BW_RD_UBOUND20},
365 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
366 PXP2_REG_RQ_BW_RD_UBOUND22},
367 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
368 PXP2_REG_RQ_BW_RD_UBOUND23},
369 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
370 PXP2_REG_RQ_BW_RD_UBOUND24},
371 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
372 PXP2_REG_RQ_BW_RD_UBOUND25},
373 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
374 PXP2_REG_RQ_BW_RD_UBOUND26},
375 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
376 PXP2_REG_RQ_BW_RD_UBOUND27},
377 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
378 PXP2_REG_PSWRQ_BW_UB28}
379};
380
381/* register addresses for write queues */
382static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
383/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
384 PXP2_REG_PSWRQ_BW_UB1},
385 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
386 PXP2_REG_PSWRQ_BW_UB2},
387 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
388 PXP2_REG_PSWRQ_BW_UB3},
389 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
390 PXP2_REG_PSWRQ_BW_UB6},
391 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
392 PXP2_REG_PSWRQ_BW_UB7},
393 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
394 PXP2_REG_PSWRQ_BW_UB8},
395 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
396 PXP2_REG_PSWRQ_BW_UB9},
397 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
398 PXP2_REG_PSWRQ_BW_UB10},
399 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
400 PXP2_REG_PSWRQ_BW_UB11},
401/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
402 PXP2_REG_PSWRQ_BW_UB28},
403 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
404 PXP2_REG_RQ_BW_WR_UBOUND29},
405 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
406 PXP2_REG_RQ_BW_WR_UBOUND30}
407};
408
409static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
410{
411 u32 val, i;
412
413 if (r_order > MAX_RD_ORD) {
414 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
415 r_order, MAX_RD_ORD);
416 r_order = MAX_RD_ORD;
417 }
418 if (w_order > MAX_WR_ORD) {
419 DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
420 w_order, MAX_WR_ORD);
421 w_order = MAX_WR_ORD;
422 }
423 if (CHIP_REV_IS_FPGA(bp)) {
424 DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
425 w_order = 0;
426 }
427 DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
428
429 for (i = 0; i < NUM_RD_Q-1; i++) {
430 REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
431 REG_WR(bp, read_arb_addr[i].add,
432 read_arb_data[i][r_order].add);
433 REG_WR(bp, read_arb_addr[i].ubound,
434 read_arb_data[i][r_order].ubound);
435 }
436
437 for (i = 0; i < NUM_WR_Q-1; i++) {
438 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
439 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
440
441 REG_WR(bp, write_arb_addr[i].l,
442 write_arb_data[i][w_order].l);
443
444 REG_WR(bp, write_arb_addr[i].add,
445 write_arb_data[i][w_order].add);
446
447 REG_WR(bp, write_arb_addr[i].ubound,
448 write_arb_data[i][w_order].ubound);
449 } else {
450
451 val = REG_RD(bp, write_arb_addr[i].l);
452 REG_WR(bp, write_arb_addr[i].l,
453 val | (write_arb_data[i][w_order].l << 10));
454
455 val = REG_RD(bp, write_arb_addr[i].add);
456 REG_WR(bp, write_arb_addr[i].add,
457 val | (write_arb_data[i][w_order].add << 10));
458
459 val = REG_RD(bp, write_arb_addr[i].ubound);
460 REG_WR(bp, write_arb_addr[i].ubound,
461 val | (write_arb_data[i][w_order].ubound << 7));
462 }
463 }
464
465 val = write_arb_data[NUM_WR_Q-1][w_order].add;
466 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
467 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
468 REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
469
470 val = read_arb_data[NUM_RD_Q-1][r_order].add;
471 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
472 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
473 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
474
475 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
476 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
477 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
478 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
479
480 if (r_order == MAX_RD_ORD)
481 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
482
483 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
484
485 if (CHIP_IS_E1H(bp)) {
486 /* MPS w_order optimal TH presently TH
487 * 128 0 0 2
488 * 256 1 1 3
489 * >=512 2 2 3
490 */
491 val = ((w_order == 0) ? 2 : 3);
492 REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
493 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
494 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
495 REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
496 REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
497 REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
498 REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
499 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
500 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
501 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
502 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
503 }
504}
505
506#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
new file mode 100644
index 00000000000..0383e306631
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -0,0 +1,6735 @@
1/* Copyright 2008-2009 Broadcom Corporation
2 *
3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you
5 * under the terms of the GNU General Public License version 2, available
6 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
7 *
8 * Notwithstanding the above, under no circumstances may you combine this
9 * software in any way with any other Broadcom software provided under a
10 * license other than the GPL, without Broadcom's express prior written
11 * consent.
12 *
13 * Written by Yaniv Rosner
14 *
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/delay.h>
24#include <linux/ethtool.h>
25#include <linux/mutex.h>
26
27#include "bnx2x.h"
28
29/********************************************************/
30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
32#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600
35#define MDIO_ACCESS_TIMEOUT 1000
36#define BMAC_CONTROL_RX_ENABLE 2
37
38/***********************************************************/
39/* Shortcut definitions */
40/***********************************************************/
41
42#define NIG_LATCH_BC_ENABLE_MI_INT 0
43
44#define NIG_STATUS_EMAC0_MI_INT \
45 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
46#define NIG_STATUS_XGXS0_LINK10G \
47 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
48#define NIG_STATUS_XGXS0_LINK_STATUS \
49 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
50#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
51 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
52#define NIG_STATUS_SERDES0_LINK_STATUS \
53 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
54#define NIG_MASK_MI_INT \
55 NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
56#define NIG_MASK_XGXS0_LINK10G \
57 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
58#define NIG_MASK_XGXS0_LINK_STATUS \
59 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
60#define NIG_MASK_SERDES0_LINK_STATUS \
61 NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
62
63#define MDIO_AN_CL73_OR_37_COMPLETE \
64 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
65 MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
66
67#define XGXS_RESET_BITS \
68 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
69 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
70 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
71 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
72 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
73
74#define SERDES_RESET_BITS \
75 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
76 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
77 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
78 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
79
80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
83#define AUTONEG_PARALLEL \
84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
85#define AUTONEG_SGMII_FIBER_AUTODET \
86 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
87#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
88
89#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
90 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
91#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
92 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
93#define GP_STATUS_SPEED_MASK \
94 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
95#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
96#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
97#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
98#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
99#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
100#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
101#define GP_STATUS_10G_HIG \
102 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
103#define GP_STATUS_10G_CX4 \
104 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
105#define GP_STATUS_12G_HIG \
106 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG
107#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G
108#define GP_STATUS_13G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G
109#define GP_STATUS_15G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G
110#define GP_STATUS_16G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G
111#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
112#define GP_STATUS_10G_KX4 \
113 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
114
115#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
116#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
117#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
118#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
119#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
120#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
121#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
122#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
123#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
124#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
125#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
126#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
127#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
128#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
129#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
130#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
131#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
132#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
133#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
134#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
135#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
136#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
137#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
138
139#define PHY_XGXS_FLAG 0x1
140#define PHY_SGMII_FLAG 0x2
141#define PHY_SERDES_FLAG 0x4
142
143/* */
144#define SFP_EEPROM_CON_TYPE_ADDR 0x2
145 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
146 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
147
148
149#define SFP_EEPROM_COMP_CODE_ADDR 0x3
150 #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
151 #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
152 #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
153
154#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
155 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
157
158#define SFP_EEPROM_OPTIONS_ADDR 0x40
159 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
160#define SFP_EEPROM_OPTIONS_SIZE 2
161
162#define EDC_MODE_LINEAR 0x0022
163#define EDC_MODE_LIMITING 0x0044
164#define EDC_MODE_PASSIVE_DAC 0x0055
165
166
167
168/**********************************************************/
169/* INTERFACE */
170/**********************************************************/
171#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
172 bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \
173 DEFAULT_PHY_DEV_ADDR, \
174 (_bank + (_addr & 0xf)), \
175 _val)
176
177#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
178 bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \
179 DEFAULT_PHY_DEV_ADDR, \
180 (_bank + (_addr & 0xf)), \
181 _val)
182
183static void bnx2x_set_serdes_access(struct link_params *params)
184{
185 struct bnx2x *bp = params->bp;
186 u32 emac_base = (params->port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
187
188 /* Set Clause 22 */
189 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 1);
190 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
191 udelay(500);
192 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
193 udelay(500);
194 /* Set Clause 45 */
195 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 0);
196}
197static void bnx2x_set_phy_mdio(struct link_params *params, u8 phy_flags)
198{
199 struct bnx2x *bp = params->bp;
200
201 if (phy_flags & PHY_XGXS_FLAG) {
202 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
203 params->port*0x18, 0);
204 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
205 DEFAULT_PHY_DEV_ADDR);
206 } else {
207 bnx2x_set_serdes_access(params);
208
209 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
210 params->port*0x10,
211 DEFAULT_PHY_DEV_ADDR);
212 }
213}
214
215static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
216{
217 u32 val = REG_RD(bp, reg);
218
219 val |= bits;
220 REG_WR(bp, reg, val);
221 return val;
222}
223
224static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
225{
226 u32 val = REG_RD(bp, reg);
227
228 val &= ~bits;
229 REG_WR(bp, reg, val);
230 return val;
231}
232
233static void bnx2x_emac_init(struct link_params *params,
234 struct link_vars *vars)
235{
236 /* reset and unreset the emac core */
237 struct bnx2x *bp = params->bp;
238 u8 port = params->port;
239 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
240 u32 val;
241 u16 timeout;
242
243 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
244 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
245 udelay(5);
246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
247 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
248
249 /* init emac - use read-modify-write */
250 /* self clear reset */
251 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
252 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
253
254 timeout = 200;
255 do {
256 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
257 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
258 if (!timeout) {
259 DP(NETIF_MSG_LINK, "EMAC timeout!\n");
260 return;
261 }
262 timeout--;
263 } while (val & EMAC_MODE_RESET);
264
265 /* Set mac address */
266 val = ((params->mac_addr[0] << 8) |
267 params->mac_addr[1]);
268 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
269
270 val = ((params->mac_addr[2] << 24) |
271 (params->mac_addr[3] << 16) |
272 (params->mac_addr[4] << 8) |
273 params->mac_addr[5]);
274 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
275}
276
277static u8 bnx2x_emac_enable(struct link_params *params,
278 struct link_vars *vars, u8 lb)
279{
280 struct bnx2x *bp = params->bp;
281 u8 port = params->port;
282 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
283 u32 val;
284
285 DP(NETIF_MSG_LINK, "enabling EMAC\n");
286
287 /* enable emac and not bmac */
288 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
289
290 /* for paladium */
291 if (CHIP_REV_IS_EMUL(bp)) {
292 /* Use lane 1 (of lanes 0-3) */
293 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
294 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
295 port*4, 1);
296 }
297 /* for fpga */
298 else
299
300 if (CHIP_REV_IS_FPGA(bp)) {
301 /* Use lane 1 (of lanes 0-3) */
302 DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
303
304 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
305 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
306 0);
307 } else
308 /* ASIC */
309 if (vars->phy_flags & PHY_XGXS_FLAG) {
310 u32 ser_lane = ((params->lane_config &
311 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
312 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
313
314 DP(NETIF_MSG_LINK, "XGXS\n");
315 /* select the master lanes (out of 0-3) */
316 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
317 port*4, ser_lane);
318 /* select XGXS */
319 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
320 port*4, 1);
321
322 } else { /* SerDes */
323 DP(NETIF_MSG_LINK, "SerDes\n");
324 /* select SerDes */
325 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
326 port*4, 0);
327 }
328
329 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
330 EMAC_RX_MODE_RESET);
331 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
332 EMAC_TX_MODE_RESET);
333
334 if (CHIP_REV_IS_SLOW(bp)) {
335 /* config GMII mode */
336 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
337 EMAC_WR(bp, EMAC_REG_EMAC_MODE,
338 (val | EMAC_MODE_PORT_GMII));
339 } else { /* ASIC */
340 /* pause enable/disable */
341 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
342 EMAC_RX_MODE_FLOW_EN);
343 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
344 bnx2x_bits_en(bp, emac_base +
345 EMAC_REG_EMAC_RX_MODE,
346 EMAC_RX_MODE_FLOW_EN);
347
348 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
349 (EMAC_TX_MODE_EXT_PAUSE_EN |
350 EMAC_TX_MODE_FLOW_EN));
351 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
352 bnx2x_bits_en(bp, emac_base +
353 EMAC_REG_EMAC_TX_MODE,
354 (EMAC_TX_MODE_EXT_PAUSE_EN |
355 EMAC_TX_MODE_FLOW_EN));
356 }
357
358 /* KEEP_VLAN_TAG, promiscuous */
359 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
360 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
361 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
362
363 /* Set Loopback */
364 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
365 if (lb)
366 val |= 0x810;
367 else
368 val &= ~0x810;
369 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
370
371 /* enable emac */
372 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
373
374 /* enable emac for jumbo packets */
375 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
376 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
377 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
378
379 /* strip CRC */
380 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
381
382 /* disable the NIG in/out to the bmac */
383 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
384 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
385 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
386
387 /* enable the NIG in/out to the emac */
388 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
389 val = 0;
390 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
391 val = 1;
392
393 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
394 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
395
396 if (CHIP_REV_IS_EMUL(bp)) {
397 /* take the BigMac out of reset */
398 REG_WR(bp,
399 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
400 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
401
402 /* enable access for bmac registers */
403 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
404 } else
405 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
406
407 vars->mac_type = MAC_TYPE_EMAC;
408 return 0;
409}
410
411
412
413static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
414 u8 is_lb)
415{
416 struct bnx2x *bp = params->bp;
417 u8 port = params->port;
418 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
419 NIG_REG_INGRESS_BMAC0_MEM;
420 u32 wb_data[2];
421 u32 val;
422
423 DP(NETIF_MSG_LINK, "Enabling BigMAC\n");
424 /* reset and unreset the BigMac */
425 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
426 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
427 msleep(1);
428
429 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
430 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
431
432 /* enable access for bmac registers */
433 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
434
435 /* XGXS control */
436 wb_data[0] = 0x3c;
437 wb_data[1] = 0;
438 REG_WR_DMAE(bp, bmac_addr +
439 BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
440 wb_data, 2);
441
442 /* tx MAC SA */
443 wb_data[0] = ((params->mac_addr[2] << 24) |
444 (params->mac_addr[3] << 16) |
445 (params->mac_addr[4] << 8) |
446 params->mac_addr[5]);
447 wb_data[1] = ((params->mac_addr[0] << 8) |
448 params->mac_addr[1]);
449 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
450 wb_data, 2);
451
452 /* tx control */
453 val = 0xc0;
454 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
455 val |= 0x800000;
456 wb_data[0] = val;
457 wb_data[1] = 0;
458 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
459 wb_data, 2);
460
461 /* mac control */
462 val = 0x3;
463 if (is_lb) {
464 val |= 0x4;
465 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
466 }
467 wb_data[0] = val;
468 wb_data[1] = 0;
469 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
470 wb_data, 2);
471
472 /* set rx mtu */
473 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
474 wb_data[1] = 0;
475 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
476 wb_data, 2);
477
478 /* rx control set to don't strip crc */
479 val = 0x14;
480 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
481 val |= 0x20;
482 wb_data[0] = val;
483 wb_data[1] = 0;
484 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
485 wb_data, 2);
486
487 /* set tx mtu */
488 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
489 wb_data[1] = 0;
490 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
491 wb_data, 2);
492
493 /* set cnt max size */
494 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
495 wb_data[1] = 0;
496 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
497 wb_data, 2);
498
499 /* configure safc */
500 wb_data[0] = 0x1000200;
501 wb_data[1] = 0;
502 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
503 wb_data, 2);
504 /* fix for emulation */
505 if (CHIP_REV_IS_EMUL(bp)) {
506 wb_data[0] = 0xf000;
507 wb_data[1] = 0;
508 REG_WR_DMAE(bp,
509 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
510 wb_data, 2);
511 }
512
513 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
514 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
515 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
516 val = 0;
517 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
518 val = 1;
519 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
520 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
521 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
522 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
523 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
524 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
525
526 vars->mac_type = MAC_TYPE_BMAC;
527 return 0;
528}
529
530static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags)
531{
532 struct bnx2x *bp = params->bp;
533 u32 val;
534
535 if (phy_flags & PHY_XGXS_FLAG) {
536 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
537 val = XGXS_RESET_BITS;
538
539 } else { /* SerDes */
540 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
541 val = SERDES_RESET_BITS;
542 }
543
544 val = val << (params->port*16);
545
546 /* reset and unreset the SerDes/XGXS */
547 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
548 val);
549 udelay(500);
550 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
551 val);
552 bnx2x_set_phy_mdio(params, phy_flags);
553}
554
555void bnx2x_link_status_update(struct link_params *params,
556 struct link_vars *vars)
557{
558 struct bnx2x *bp = params->bp;
559 u8 link_10g;
560 u8 port = params->port;
561
562 if (params->switch_cfg == SWITCH_CFG_1G)
563 vars->phy_flags = PHY_SERDES_FLAG;
564 else
565 vars->phy_flags = PHY_XGXS_FLAG;
566 vars->link_status = REG_RD(bp, params->shmem_base +
567 offsetof(struct shmem_region,
568 port_mb[port].link_status));
569
570 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
571
572 if (vars->link_up) {
573 DP(NETIF_MSG_LINK, "phy link up\n");
574
575 vars->phy_link_up = 1;
576 vars->duplex = DUPLEX_FULL;
577 switch (vars->link_status &
578 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
579 case LINK_10THD:
580 vars->duplex = DUPLEX_HALF;
581 /* fall thru */
582 case LINK_10TFD:
583 vars->line_speed = SPEED_10;
584 break;
585
586 case LINK_100TXHD:
587 vars->duplex = DUPLEX_HALF;
588 /* fall thru */
589 case LINK_100T4:
590 case LINK_100TXFD:
591 vars->line_speed = SPEED_100;
592 break;
593
594 case LINK_1000THD:
595 vars->duplex = DUPLEX_HALF;
596 /* fall thru */
597 case LINK_1000TFD:
598 vars->line_speed = SPEED_1000;
599 break;
600
601 case LINK_2500THD:
602 vars->duplex = DUPLEX_HALF;
603 /* fall thru */
604 case LINK_2500TFD:
605 vars->line_speed = SPEED_2500;
606 break;
607
608 case LINK_10GTFD:
609 vars->line_speed = SPEED_10000;
610 break;
611
612 case LINK_12GTFD:
613 vars->line_speed = SPEED_12000;
614 break;
615
616 case LINK_12_5GTFD:
617 vars->line_speed = SPEED_12500;
618 break;
619
620 case LINK_13GTFD:
621 vars->line_speed = SPEED_13000;
622 break;
623
624 case LINK_15GTFD:
625 vars->line_speed = SPEED_15000;
626 break;
627
628 case LINK_16GTFD:
629 vars->line_speed = SPEED_16000;
630 break;
631
632 default:
633 break;
634 }
635
636 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
637 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
638 else
639 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
640
641 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
642 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
643 else
644 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
645
646 if (vars->phy_flags & PHY_XGXS_FLAG) {
647 if (vars->line_speed &&
648 ((vars->line_speed == SPEED_10) ||
649 (vars->line_speed == SPEED_100))) {
650 vars->phy_flags |= PHY_SGMII_FLAG;
651 } else {
652 vars->phy_flags &= ~PHY_SGMII_FLAG;
653 }
654 }
655
656 /* anything 10 and over uses the bmac */
657 link_10g = ((vars->line_speed == SPEED_10000) ||
658 (vars->line_speed == SPEED_12000) ||
659 (vars->line_speed == SPEED_12500) ||
660 (vars->line_speed == SPEED_13000) ||
661 (vars->line_speed == SPEED_15000) ||
662 (vars->line_speed == SPEED_16000));
663 if (link_10g)
664 vars->mac_type = MAC_TYPE_BMAC;
665 else
666 vars->mac_type = MAC_TYPE_EMAC;
667
668 } else { /* link down */
669 DP(NETIF_MSG_LINK, "phy link down\n");
670
671 vars->phy_link_up = 0;
672
673 vars->line_speed = 0;
674 vars->duplex = DUPLEX_FULL;
675 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
676
677 /* indicate no mac active */
678 vars->mac_type = MAC_TYPE_NONE;
679 }
680
681 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
682 vars->link_status, vars->phy_link_up);
683 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
684 vars->line_speed, vars->duplex, vars->flow_ctrl);
685}
686
687static void bnx2x_update_mng(struct link_params *params, u32 link_status)
688{
689 struct bnx2x *bp = params->bp;
690
691 REG_WR(bp, params->shmem_base +
692 offsetof(struct shmem_region,
693 port_mb[params->port].link_status),
694 link_status);
695}
696
697static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
698{
699 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
700 NIG_REG_INGRESS_BMAC0_MEM;
701 u32 wb_data[2];
702 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
703
704 /* Only if the bmac is out of reset */
705 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
706 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
707 nig_bmac_enable) {
708
709 /* Clear Rx Enable bit in BMAC_CONTROL register */
710 REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
711 wb_data, 2);
712 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
713 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
714 wb_data, 2);
715
716 msleep(1);
717 }
718}
719
720static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
721 u32 line_speed)
722{
723 struct bnx2x *bp = params->bp;
724 u8 port = params->port;
725 u32 init_crd, crd;
726 u32 count = 1000;
727
728 /* disable port */
729 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
730
731 /* wait for init credit */
732 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
733 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
734 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
735
736 while ((init_crd != crd) && count) {
737 msleep(5);
738
739 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
740 count--;
741 }
742 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
743 if (init_crd != crd) {
744 DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
745 init_crd, crd);
746 return -EINVAL;
747 }
748
749 if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
750 line_speed == SPEED_10 ||
751 line_speed == SPEED_100 ||
752 line_speed == SPEED_1000 ||
753 line_speed == SPEED_2500) {
754 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
755 /* update threshold */
756 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
757 /* update init credit */
758 init_crd = 778; /* (800-18-4) */
759
760 } else {
761 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
762 ETH_OVREHEAD)/16;
763 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
764 /* update threshold */
765 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
766 /* update init credit */
767 switch (line_speed) {
768 case SPEED_10000:
769 init_crd = thresh + 553 - 22;
770 break;
771
772 case SPEED_12000:
773 init_crd = thresh + 664 - 22;
774 break;
775
776 case SPEED_13000:
777 init_crd = thresh + 742 - 22;
778 break;
779
780 case SPEED_16000:
781 init_crd = thresh + 778 - 22;
782 break;
783 default:
784 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
785 line_speed);
786 return -EINVAL;
787 }
788 }
789 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
790 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
791 line_speed, init_crd);
792
793 /* probe the credit changes */
794 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
795 msleep(5);
796 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
797
798 /* enable port */
799 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
800 return 0;
801}
802
803static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 ext_phy_type, u8 port)
804{
805 u32 emac_base;
806
807 switch (ext_phy_type) {
808 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
810 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
811 /* All MDC/MDIO is directed through single EMAC */
812 if (REG_RD(bp, NIG_REG_PORT_SWAP))
813 emac_base = GRCBASE_EMAC0;
814 else
815 emac_base = GRCBASE_EMAC1;
816 break;
817 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
818 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
819 break;
820 default:
821 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
822 break;
823 }
824 return emac_base;
825
826}
827
828u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
829 u8 phy_addr, u8 devad, u16 reg, u16 val)
830{
831 u32 tmp, saved_mode;
832 u8 i, rc = 0;
833 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
834
835 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
836 * (a value of 49==0x31) and make sure that the AUTO poll is off
837 */
838
839 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
840 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
841 EMAC_MDIO_MODE_CLOCK_CNT);
842 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
843 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
844 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
845 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
846 udelay(40);
847
848 /* address */
849
850 tmp = ((phy_addr << 21) | (devad << 16) | reg |
851 EMAC_MDIO_COMM_COMMAND_ADDRESS |
852 EMAC_MDIO_COMM_START_BUSY);
853 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
854
855 for (i = 0; i < 50; i++) {
856 udelay(10);
857
858 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
859 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
860 udelay(5);
861 break;
862 }
863 }
864 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
865 DP(NETIF_MSG_LINK, "write phy register failed\n");
866 rc = -EFAULT;
867 } else {
868 /* data */
869 tmp = ((phy_addr << 21) | (devad << 16) | val |
870 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
871 EMAC_MDIO_COMM_START_BUSY);
872 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
873
874 for (i = 0; i < 50; i++) {
875 udelay(10);
876
877 tmp = REG_RD(bp, mdio_ctrl +
878 EMAC_REG_EMAC_MDIO_COMM);
879 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
880 udelay(5);
881 break;
882 }
883 }
884 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
885 DP(NETIF_MSG_LINK, "write phy register failed\n");
886 rc = -EFAULT;
887 }
888 }
889
890 /* Restore the saved mode */
891 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
892
893 return rc;
894}
895
896u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
897 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val)
898{
899 u32 val, saved_mode;
900 u16 i;
901 u8 rc = 0;
902
903 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
904 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
905 * (a value of 49==0x31) and make sure that the AUTO poll is off
906 */
907
908 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
909 val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL |
910 EMAC_MDIO_MODE_CLOCK_CNT));
911 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
912 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
913 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
914 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
915 udelay(40);
916
917 /* address */
918 val = ((phy_addr << 21) | (devad << 16) | reg |
919 EMAC_MDIO_COMM_COMMAND_ADDRESS |
920 EMAC_MDIO_COMM_START_BUSY);
921 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
922
923 for (i = 0; i < 50; i++) {
924 udelay(10);
925
926 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
927 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
928 udelay(5);
929 break;
930 }
931 }
932 if (val & EMAC_MDIO_COMM_START_BUSY) {
933 DP(NETIF_MSG_LINK, "read phy register failed\n");
934
935 *ret_val = 0;
936 rc = -EFAULT;
937
938 } else {
939 /* data */
940 val = ((phy_addr << 21) | (devad << 16) |
941 EMAC_MDIO_COMM_COMMAND_READ_45 |
942 EMAC_MDIO_COMM_START_BUSY);
943 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
944
945 for (i = 0; i < 50; i++) {
946 udelay(10);
947
948 val = REG_RD(bp, mdio_ctrl +
949 EMAC_REG_EMAC_MDIO_COMM);
950 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
951 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
952 break;
953 }
954 }
955 if (val & EMAC_MDIO_COMM_START_BUSY) {
956 DP(NETIF_MSG_LINK, "read phy register failed\n");
957
958 *ret_val = 0;
959 rc = -EFAULT;
960 }
961 }
962
963 /* Restore the saved mode */
964 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
965
966 return rc;
967}
968
969static void bnx2x_set_aer_mmd(struct link_params *params,
970 struct link_vars *vars)
971{
972 struct bnx2x *bp = params->bp;
973 u32 ser_lane;
974 u16 offset;
975
976 ser_lane = ((params->lane_config &
977 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
978 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
979
980 offset = (vars->phy_flags & PHY_XGXS_FLAG) ?
981 (params->phy_addr + ser_lane) : 0;
982
983 CL45_WR_OVER_CL22(bp, params->port,
984 params->phy_addr,
985 MDIO_REG_BANK_AER_BLOCK,
986 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
987}
988
989static void bnx2x_set_master_ln(struct link_params *params)
990{
991 struct bnx2x *bp = params->bp;
992 u16 new_master_ln, ser_lane;
993 ser_lane = ((params->lane_config &
994 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
995 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
996
997 /* set the master_ln for AN */
998 CL45_RD_OVER_CL22(bp, params->port,
999 params->phy_addr,
1000 MDIO_REG_BANK_XGXS_BLOCK2,
1001 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1002 &new_master_ln);
1003
1004 CL45_WR_OVER_CL22(bp, params->port,
1005 params->phy_addr,
1006 MDIO_REG_BANK_XGXS_BLOCK2 ,
1007 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1008 (new_master_ln | ser_lane));
1009}
1010
1011static u8 bnx2x_reset_unicore(struct link_params *params)
1012{
1013 struct bnx2x *bp = params->bp;
1014 u16 mii_control;
1015 u16 i;
1016
1017 CL45_RD_OVER_CL22(bp, params->port,
1018 params->phy_addr,
1019 MDIO_REG_BANK_COMBO_IEEE0,
1020 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1021
1022 /* reset the unicore */
1023 CL45_WR_OVER_CL22(bp, params->port,
1024 params->phy_addr,
1025 MDIO_REG_BANK_COMBO_IEEE0,
1026 MDIO_COMBO_IEEE0_MII_CONTROL,
1027 (mii_control |
1028 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1029 if (params->switch_cfg == SWITCH_CFG_1G)
1030 bnx2x_set_serdes_access(params);
1031
1032 /* wait for the reset to self clear */
1033 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
1034 udelay(5);
1035
1036 /* the reset erased the previous bank value */
1037 CL45_RD_OVER_CL22(bp, params->port,
1038 params->phy_addr,
1039 MDIO_REG_BANK_COMBO_IEEE0,
1040 MDIO_COMBO_IEEE0_MII_CONTROL,
1041 &mii_control);
1042
1043 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
1044 udelay(5);
1045 return 0;
1046 }
1047 }
1048
1049 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
1050 return -EINVAL;
1051
1052}
1053
1054static void bnx2x_set_swap_lanes(struct link_params *params)
1055{
1056 struct bnx2x *bp = params->bp;
1057 /* Each two bits represents a lane number:
1058 No swap is 0123 => 0x1b no need to enable the swap */
1059 u16 ser_lane, rx_lane_swap, tx_lane_swap;
1060
1061 ser_lane = ((params->lane_config &
1062 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1063 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1064 rx_lane_swap = ((params->lane_config &
1065 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
1066 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
1067 tx_lane_swap = ((params->lane_config &
1068 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
1069 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1070
1071 if (rx_lane_swap != 0x1b) {
1072 CL45_WR_OVER_CL22(bp, params->port,
1073 params->phy_addr,
1074 MDIO_REG_BANK_XGXS_BLOCK2,
1075 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1076 (rx_lane_swap |
1077 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1078 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1079 } else {
1080 CL45_WR_OVER_CL22(bp, params->port,
1081 params->phy_addr,
1082 MDIO_REG_BANK_XGXS_BLOCK2,
1083 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1084 }
1085
1086 if (tx_lane_swap != 0x1b) {
1087 CL45_WR_OVER_CL22(bp, params->port,
1088 params->phy_addr,
1089 MDIO_REG_BANK_XGXS_BLOCK2,
1090 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1091 (tx_lane_swap |
1092 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1093 } else {
1094 CL45_WR_OVER_CL22(bp, params->port,
1095 params->phy_addr,
1096 MDIO_REG_BANK_XGXS_BLOCK2,
1097 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1098 }
1099}
1100
1101static void bnx2x_set_parallel_detection(struct link_params *params,
1102 u8 phy_flags)
1103{
1104 struct bnx2x *bp = params->bp;
1105 u16 control2;
1106
1107 CL45_RD_OVER_CL22(bp, params->port,
1108 params->phy_addr,
1109 MDIO_REG_BANK_SERDES_DIGITAL,
1110 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1111 &control2);
1112 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1113 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 else
1115 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1116 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1117 params->speed_cap_mask, control2);
1118 CL45_WR_OVER_CL22(bp, params->port,
1119 params->phy_addr,
1120 MDIO_REG_BANK_SERDES_DIGITAL,
1121 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1122 control2);
1123
1124 if ((phy_flags & PHY_XGXS_FLAG) &&
1125 (params->speed_cap_mask &
1126 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1127 DP(NETIF_MSG_LINK, "XGXS\n");
1128
1129 CL45_WR_OVER_CL22(bp, params->port,
1130 params->phy_addr,
1131 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1132 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1133 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1134
1135 CL45_RD_OVER_CL22(bp, params->port,
1136 params->phy_addr,
1137 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1138 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1139 &control2);
1140
1141
1142 control2 |=
1143 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1144
1145 CL45_WR_OVER_CL22(bp, params->port,
1146 params->phy_addr,
1147 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1148 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1149 control2);
1150
1151 /* Disable parallel detection of HiG */
1152 CL45_WR_OVER_CL22(bp, params->port,
1153 params->phy_addr,
1154 MDIO_REG_BANK_XGXS_BLOCK2,
1155 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1156 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
1157 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1158 }
1159}
1160
1161static void bnx2x_set_autoneg(struct link_params *params,
1162 struct link_vars *vars,
1163 u8 enable_cl73)
1164{
1165 struct bnx2x *bp = params->bp;
1166 u16 reg_val;
1167
1168 /* CL37 Autoneg */
1169
1170 CL45_RD_OVER_CL22(bp, params->port,
1171 params->phy_addr,
1172 MDIO_REG_BANK_COMBO_IEEE0,
1173 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1174
1175 /* CL37 Autoneg Enabled */
1176 if (vars->line_speed == SPEED_AUTO_NEG)
1177 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
1178 else /* CL37 Autoneg Disabled */
1179 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1180 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1181
1182 CL45_WR_OVER_CL22(bp, params->port,
1183 params->phy_addr,
1184 MDIO_REG_BANK_COMBO_IEEE0,
1185 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1186
1187 /* Enable/Disable Autodetection */
1188
1189 CL45_RD_OVER_CL22(bp, params->port,
1190 params->phy_addr,
1191 MDIO_REG_BANK_SERDES_DIGITAL,
1192 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1193 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
1194 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
1195 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
1196 if (vars->line_speed == SPEED_AUTO_NEG)
1197 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1198 else
1199 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1200
1201 CL45_WR_OVER_CL22(bp, params->port,
1202 params->phy_addr,
1203 MDIO_REG_BANK_SERDES_DIGITAL,
1204 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1205
1206 /* Enable TetonII and BAM autoneg */
1207 CL45_RD_OVER_CL22(bp, params->port,
1208 params->phy_addr,
1209 MDIO_REG_BANK_BAM_NEXT_PAGE,
1210 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1211 &reg_val);
1212 if (vars->line_speed == SPEED_AUTO_NEG) {
1213 /* Enable BAM aneg Mode and TetonII aneg Mode */
1214 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1215 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1216 } else {
1217 /* TetonII and BAM Autoneg Disabled */
1218 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1219 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1220 }
1221 CL45_WR_OVER_CL22(bp, params->port,
1222 params->phy_addr,
1223 MDIO_REG_BANK_BAM_NEXT_PAGE,
1224 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1225 reg_val);
1226
1227 if (enable_cl73) {
1228 /* Enable Cl73 FSM status bits */
1229 CL45_WR_OVER_CL22(bp, params->port,
1230 params->phy_addr,
1231 MDIO_REG_BANK_CL73_USERB0,
1232 MDIO_CL73_USERB0_CL73_UCTRL,
1233 0xe);
1234
1235 /* Enable BAM Station Manager*/
1236 CL45_WR_OVER_CL22(bp, params->port,
1237 params->phy_addr,
1238 MDIO_REG_BANK_CL73_USERB0,
1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
1241 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
1242 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1243
1244 /* Advertise CL73 link speeds */
1245 CL45_RD_OVER_CL22(bp, params->port,
1246 params->phy_addr,
1247 MDIO_REG_BANK_CL73_IEEEB1,
1248 MDIO_CL73_IEEEB1_AN_ADV2,
1249 &reg_val);
1250 if (params->speed_cap_mask &
1251 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1252 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1253 if (params->speed_cap_mask &
1254 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1255 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1256
1257 CL45_WR_OVER_CL22(bp, params->port,
1258 params->phy_addr,
1259 MDIO_REG_BANK_CL73_IEEEB1,
1260 MDIO_CL73_IEEEB1_AN_ADV2,
1261 reg_val);
1262
1263 /* CL73 Autoneg Enabled */
1264 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
1265
1266 } else /* CL73 Autoneg Disabled */
1267 reg_val = 0;
1268
1269 CL45_WR_OVER_CL22(bp, params->port,
1270 params->phy_addr,
1271 MDIO_REG_BANK_CL73_IEEEB0,
1272 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
1273}
1274
1275/* program SerDes, forced speed */
1276static void bnx2x_program_serdes(struct link_params *params,
1277 struct link_vars *vars)
1278{
1279 struct bnx2x *bp = params->bp;
1280 u16 reg_val;
1281
1282 /* program duplex, disable autoneg and sgmii*/
1283 CL45_RD_OVER_CL22(bp, params->port,
1284 params->phy_addr,
1285 MDIO_REG_BANK_COMBO_IEEE0,
1286 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1287 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
1288 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1289 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
1290 if (params->req_duplex == DUPLEX_FULL)
1291 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1292 CL45_WR_OVER_CL22(bp, params->port,
1293 params->phy_addr,
1294 MDIO_REG_BANK_COMBO_IEEE0,
1295 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1296
1297 /* program speed
1298 - needed only if the speed is greater than 1G (2.5G or 10G) */
1299 CL45_RD_OVER_CL22(bp, params->port,
1300 params->phy_addr,
1301 MDIO_REG_BANK_SERDES_DIGITAL,
1302 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1303 /* clearing the speed value before setting the right speed */
1304 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
1305
1306 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
1307 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1308
1309 if (!((vars->line_speed == SPEED_1000) ||
1310 (vars->line_speed == SPEED_100) ||
1311 (vars->line_speed == SPEED_10))) {
1312
1313 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
1314 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1315 if (vars->line_speed == SPEED_10000)
1316 reg_val |=
1317 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
1318 if (vars->line_speed == SPEED_13000)
1319 reg_val |=
1320 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1321 }
1322
1323 CL45_WR_OVER_CL22(bp, params->port,
1324 params->phy_addr,
1325 MDIO_REG_BANK_SERDES_DIGITAL,
1326 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1327
1328}
1329
1330static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1331{
1332 struct bnx2x *bp = params->bp;
1333 u16 val = 0;
1334
1335 /* configure the 48 bits for BAM AN */
1336
1337 /* set extended capabilities */
1338 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
1339 val |= MDIO_OVER_1G_UP1_2_5G;
1340 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1341 val |= MDIO_OVER_1G_UP1_10G;
1342 CL45_WR_OVER_CL22(bp, params->port,
1343 params->phy_addr,
1344 MDIO_REG_BANK_OVER_1G,
1345 MDIO_OVER_1G_UP1, val);
1346
1347 CL45_WR_OVER_CL22(bp, params->port,
1348 params->phy_addr,
1349 MDIO_REG_BANK_OVER_1G,
1350 MDIO_OVER_1G_UP3, 0x400);
1351}
1352
1353static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1354{
1355 struct bnx2x *bp = params->bp;
1356 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1357 /* resolve pause mode and advertisement
1358 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1359
1360 switch (params->req_flow_ctrl) {
1361 case BNX2X_FLOW_CTRL_AUTO:
1362 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
1363 *ieee_fc |=
1364 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1365 } else {
1366 *ieee_fc |=
1367 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1368 }
1369 break;
1370 case BNX2X_FLOW_CTRL_TX:
1371 *ieee_fc |=
1372 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1373 break;
1374
1375 case BNX2X_FLOW_CTRL_RX:
1376 case BNX2X_FLOW_CTRL_BOTH:
1377 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1378 break;
1379
1380 case BNX2X_FLOW_CTRL_NONE:
1381 default:
1382 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1383 break;
1384 }
1385 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1386}
1387
1388static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1389 u16 ieee_fc)
1390{
1391 struct bnx2x *bp = params->bp;
1392 u16 val;
1393 /* for AN, we are always publishing full duplex */
1394
1395 CL45_WR_OVER_CL22(bp, params->port,
1396 params->phy_addr,
1397 MDIO_REG_BANK_COMBO_IEEE0,
1398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1399 CL45_RD_OVER_CL22(bp, params->port,
1400 params->phy_addr,
1401 MDIO_REG_BANK_CL73_IEEEB1,
1402 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1403 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1404 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1405 CL45_WR_OVER_CL22(bp, params->port,
1406 params->phy_addr,
1407 MDIO_REG_BANK_CL73_IEEEB1,
1408 MDIO_CL73_IEEEB1_AN_ADV1, val);
1409}
1410
1411static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1412{
1413 struct bnx2x *bp = params->bp;
1414 u16 mii_control;
1415
1416 DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
1417 /* Enable and restart BAM/CL37 aneg */
1418
1419 if (enable_cl73) {
1420 CL45_RD_OVER_CL22(bp, params->port,
1421 params->phy_addr,
1422 MDIO_REG_BANK_CL73_IEEEB0,
1423 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1424 &mii_control);
1425
1426 CL45_WR_OVER_CL22(bp, params->port,
1427 params->phy_addr,
1428 MDIO_REG_BANK_CL73_IEEEB0,
1429 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1430 (mii_control |
1431 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
1432 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
1433 } else {
1434
1435 CL45_RD_OVER_CL22(bp, params->port,
1436 params->phy_addr,
1437 MDIO_REG_BANK_COMBO_IEEE0,
1438 MDIO_COMBO_IEEE0_MII_CONTROL,
1439 &mii_control);
1440 DP(NETIF_MSG_LINK,
1441 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
1442 mii_control);
1443 CL45_WR_OVER_CL22(bp, params->port,
1444 params->phy_addr,
1445 MDIO_REG_BANK_COMBO_IEEE0,
1446 MDIO_COMBO_IEEE0_MII_CONTROL,
1447 (mii_control |
1448 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1449 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
1450 }
1451}
1452
1453static void bnx2x_initialize_sgmii_process(struct link_params *params,
1454 struct link_vars *vars)
1455{
1456 struct bnx2x *bp = params->bp;
1457 u16 control1;
1458
1459 /* in SGMII mode, the unicore is always slave */
1460
1461 CL45_RD_OVER_CL22(bp, params->port,
1462 params->phy_addr,
1463 MDIO_REG_BANK_SERDES_DIGITAL,
1464 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1465 &control1);
1466 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
1467 /* set sgmii mode (and not fiber) */
1468 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
1469 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
1470 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
1471 CL45_WR_OVER_CL22(bp, params->port,
1472 params->phy_addr,
1473 MDIO_REG_BANK_SERDES_DIGITAL,
1474 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1475 control1);
1476
1477 /* if forced speed */
1478 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
1479 /* set speed, disable autoneg */
1480 u16 mii_control;
1481
1482 CL45_RD_OVER_CL22(bp, params->port,
1483 params->phy_addr,
1484 MDIO_REG_BANK_COMBO_IEEE0,
1485 MDIO_COMBO_IEEE0_MII_CONTROL,
1486 &mii_control);
1487 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1488 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
1489 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
1490
1491 switch (vars->line_speed) {
1492 case SPEED_100:
1493 mii_control |=
1494 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
1495 break;
1496 case SPEED_1000:
1497 mii_control |=
1498 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
1499 break;
1500 case SPEED_10:
1501 /* there is nothing to set for 10M */
1502 break;
1503 default:
1504 /* invalid speed for SGMII */
1505 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
1506 vars->line_speed);
1507 break;
1508 }
1509
1510 /* setting the full duplex */
1511 if (params->req_duplex == DUPLEX_FULL)
1512 mii_control |=
1513 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1514 CL45_WR_OVER_CL22(bp, params->port,
1515 params->phy_addr,
1516 MDIO_REG_BANK_COMBO_IEEE0,
1517 MDIO_COMBO_IEEE0_MII_CONTROL,
1518 mii_control);
1519
1520 } else { /* AN mode */
1521 /* enable and restart AN */
1522 bnx2x_restart_autoneg(params, 0);
1523 }
1524}
1525
1526
1527/*
1528 * link management
1529 */
1530
1531static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1532{ /* LD LP */
1533 switch (pause_result) { /* ASYM P ASYM P */
1534 case 0xb: /* 1 0 1 1 */
1535 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
1536 break;
1537
1538 case 0xe: /* 1 1 1 0 */
1539 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
1540 break;
1541
1542 case 0x5: /* 0 1 0 1 */
1543 case 0x7: /* 0 1 1 1 */
1544 case 0xd: /* 1 1 0 1 */
1545 case 0xf: /* 1 1 1 1 */
1546 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
1547 break;
1548
1549 default:
1550 break;
1551 }
1552}
1553
1554static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1555 struct link_vars *vars)
1556{
1557 struct bnx2x *bp = params->bp;
1558 u8 ext_phy_addr;
1559 u16 ld_pause; /* local */
1560 u16 lp_pause; /* link partner */
1561 u16 an_complete; /* AN complete */
1562 u16 pause_result;
1563 u8 ret = 0;
1564 u32 ext_phy_type;
1565 u8 port = params->port;
1566 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
1567 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
1568 /* read twice */
1569
1570 bnx2x_cl45_read(bp, port,
1571 ext_phy_type,
1572 ext_phy_addr,
1573 MDIO_AN_DEVAD,
1574 MDIO_AN_REG_STATUS, &an_complete);
1575 bnx2x_cl45_read(bp, port,
1576 ext_phy_type,
1577 ext_phy_addr,
1578 MDIO_AN_DEVAD,
1579 MDIO_AN_REG_STATUS, &an_complete);
1580
1581 if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
1582 ret = 1;
1583 bnx2x_cl45_read(bp, port,
1584 ext_phy_type,
1585 ext_phy_addr,
1586 MDIO_AN_DEVAD,
1587 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
1588 bnx2x_cl45_read(bp, port,
1589 ext_phy_type,
1590 ext_phy_addr,
1591 MDIO_AN_DEVAD,
1592 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
1593 pause_result = (ld_pause &
1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
1595 pause_result |= (lp_pause &
1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
1598 pause_result);
1599 bnx2x_pause_resolve(vars, pause_result);
1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
1601 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1602 bnx2x_cl45_read(bp, port,
1603 ext_phy_type,
1604 ext_phy_addr,
1605 MDIO_AN_DEVAD,
1606 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1607
1608 bnx2x_cl45_read(bp, port,
1609 ext_phy_type,
1610 ext_phy_addr,
1611 MDIO_AN_DEVAD,
1612 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1613 pause_result = (ld_pause &
1614 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1615 pause_result |= (lp_pause &
1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1617
1618 bnx2x_pause_resolve(vars, pause_result);
1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
1620 pause_result);
1621 }
1622 }
1623 return ret;
1624}
1625
1626static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1627{
1628 struct bnx2x *bp = params->bp;
1629 u16 pd_10g, status2_1000x;
1630 CL45_RD_OVER_CL22(bp, params->port,
1631 params->phy_addr,
1632 MDIO_REG_BANK_SERDES_DIGITAL,
1633 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1634 &status2_1000x);
1635 CL45_RD_OVER_CL22(bp, params->port,
1636 params->phy_addr,
1637 MDIO_REG_BANK_SERDES_DIGITAL,
1638 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1639 &status2_1000x);
1640 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
1641 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
1642 params->port);
1643 return 1;
1644 }
1645
1646 CL45_RD_OVER_CL22(bp, params->port,
1647 params->phy_addr,
1648 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1649 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1650 &pd_10g);
1651
1652 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
1653 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
1654 params->port);
1655 return 1;
1656 }
1657 return 0;
1658}
1659
1660static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1661 struct link_vars *vars,
1662 u32 gp_status)
1663{
1664 struct bnx2x *bp = params->bp;
1665 u16 ld_pause; /* local driver */
1666 u16 lp_pause; /* link partner */
1667 u16 pause_result;
1668
1669 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1670
1671 /* resolve from gp_status in case of AN complete and not sgmii */
1672 if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1673 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1674 (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
1675 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1676 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1677 if (bnx2x_direct_parallel_detect_used(params)) {
1678 vars->flow_ctrl = params->req_fc_auto_adv;
1679 return;
1680 }
1681 if ((gp_status &
1682 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1683 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
1684 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1685 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1686
1687 CL45_RD_OVER_CL22(bp, params->port,
1688 params->phy_addr,
1689 MDIO_REG_BANK_CL73_IEEEB1,
1690 MDIO_CL73_IEEEB1_AN_ADV1,
1691 &ld_pause);
1692 CL45_RD_OVER_CL22(bp, params->port,
1693 params->phy_addr,
1694 MDIO_REG_BANK_CL73_IEEEB1,
1695 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1696 &lp_pause);
1697 pause_result = (ld_pause &
1698 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
1699 >> 8;
1700 pause_result |= (lp_pause &
1701 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
1702 >> 10;
1703 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1704 pause_result);
1705 } else {
1706
1707 CL45_RD_OVER_CL22(bp, params->port,
1708 params->phy_addr,
1709 MDIO_REG_BANK_COMBO_IEEE0,
1710 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1711 &ld_pause);
1712 CL45_RD_OVER_CL22(bp, params->port,
1713 params->phy_addr,
1714 MDIO_REG_BANK_COMBO_IEEE0,
1715 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1716 &lp_pause);
1717 pause_result = (ld_pause &
1718 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1719 pause_result |= (lp_pause &
1720 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1721 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
1722 pause_result);
1723 }
1724 bnx2x_pause_resolve(vars, pause_result);
1725 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1726 (bnx2x_ext_phy_resolve_fc(params, vars))) {
1727 return;
1728 } else {
1729 if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
1730 vars->flow_ctrl = params->req_fc_auto_adv;
1731 else
1732 vars->flow_ctrl = params->req_flow_ctrl;
1733 }
1734 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1735}
1736
1737static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1738{
1739 struct bnx2x *bp = params->bp;
1740 u16 rx_status, ustat_val, cl37_fsm_recieved;
1741 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
1742 /* Step 1: Make sure signal is detected */
1743 CL45_RD_OVER_CL22(bp, params->port,
1744 params->phy_addr,
1745 MDIO_REG_BANK_RX0,
1746 MDIO_RX0_RX_STATUS,
1747 &rx_status);
1748 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
1749 (MDIO_RX0_RX_STATUS_SIGDET)) {
1750 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
1751 "rx_status(0x80b0) = 0x%x\n", rx_status);
1752 CL45_WR_OVER_CL22(bp, params->port,
1753 params->phy_addr,
1754 MDIO_REG_BANK_CL73_IEEEB0,
1755 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1756 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
1757 return;
1758 }
1759 /* Step 2: Check CL73 state machine */
1760 CL45_RD_OVER_CL22(bp, params->port,
1761 params->phy_addr,
1762 MDIO_REG_BANK_CL73_USERB0,
1763 MDIO_CL73_USERB0_CL73_USTAT1,
1764 &ustat_val);
1765 if ((ustat_val &
1766 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
1767 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
1768 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
1769 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
1770 DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
1771 "ustat_val(0x8371) = 0x%x\n", ustat_val);
1772 return;
1773 }
1774 /* Step 3: Check CL37 Message Pages received to indicate LP
1775 supports only CL37 */
1776 CL45_RD_OVER_CL22(bp, params->port,
1777 params->phy_addr,
1778 MDIO_REG_BANK_REMOTE_PHY,
1779 MDIO_REMOTE_PHY_MISC_RX_STATUS,
1780 &cl37_fsm_recieved);
1781 if ((cl37_fsm_recieved &
1782 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
1783 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
1784 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
1785 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
1786 DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
1787 "misc_rx_status(0x8330) = 0x%x\n",
1788 cl37_fsm_recieved);
1789 return;
1790 }
1791 /* The combined cl37/cl73 fsm state information indicating that we are
1792 connected to a device which does not support cl73, but does support
1793 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
1794 /* Disable CL73 */
1795 CL45_WR_OVER_CL22(bp, params->port,
1796 params->phy_addr,
1797 MDIO_REG_BANK_CL73_IEEEB0,
1798 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1799 0);
1800 /* Restart CL37 autoneg */
1801 bnx2x_restart_autoneg(params, 0);
1802 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
1803}
1804static u8 bnx2x_link_settings_status(struct link_params *params,
1805 struct link_vars *vars,
1806 u32 gp_status,
1807 u8 ext_phy_link_up)
1808{
1809 struct bnx2x *bp = params->bp;
1810 u16 new_line_speed;
1811 u8 rc = 0;
1812 vars->link_status = 0;
1813
1814 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1815 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
1816 gp_status);
1817
1818 vars->phy_link_up = 1;
1819 vars->link_status |= LINK_STATUS_LINK_UP;
1820
1821 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1822 vars->duplex = DUPLEX_FULL;
1823 else
1824 vars->duplex = DUPLEX_HALF;
1825
1826 bnx2x_flow_ctrl_resolve(params, vars, gp_status);
1827
1828 switch (gp_status & GP_STATUS_SPEED_MASK) {
1829 case GP_STATUS_10M:
1830 new_line_speed = SPEED_10;
1831 if (vars->duplex == DUPLEX_FULL)
1832 vars->link_status |= LINK_10TFD;
1833 else
1834 vars->link_status |= LINK_10THD;
1835 break;
1836
1837 case GP_STATUS_100M:
1838 new_line_speed = SPEED_100;
1839 if (vars->duplex == DUPLEX_FULL)
1840 vars->link_status |= LINK_100TXFD;
1841 else
1842 vars->link_status |= LINK_100TXHD;
1843 break;
1844
1845 case GP_STATUS_1G:
1846 case GP_STATUS_1G_KX:
1847 new_line_speed = SPEED_1000;
1848 if (vars->duplex == DUPLEX_FULL)
1849 vars->link_status |= LINK_1000TFD;
1850 else
1851 vars->link_status |= LINK_1000THD;
1852 break;
1853
1854 case GP_STATUS_2_5G:
1855 new_line_speed = SPEED_2500;
1856 if (vars->duplex == DUPLEX_FULL)
1857 vars->link_status |= LINK_2500TFD;
1858 else
1859 vars->link_status |= LINK_2500THD;
1860 break;
1861
1862 case GP_STATUS_5G:
1863 case GP_STATUS_6G:
1864 DP(NETIF_MSG_LINK,
1865 "link speed unsupported gp_status 0x%x\n",
1866 gp_status);
1867 return -EINVAL;
1868
1869 case GP_STATUS_10G_KX4:
1870 case GP_STATUS_10G_HIG:
1871 case GP_STATUS_10G_CX4:
1872 new_line_speed = SPEED_10000;
1873 vars->link_status |= LINK_10GTFD;
1874 break;
1875
1876 case GP_STATUS_12G_HIG:
1877 new_line_speed = SPEED_12000;
1878 vars->link_status |= LINK_12GTFD;
1879 break;
1880
1881 case GP_STATUS_12_5G:
1882 new_line_speed = SPEED_12500;
1883 vars->link_status |= LINK_12_5GTFD;
1884 break;
1885
1886 case GP_STATUS_13G:
1887 new_line_speed = SPEED_13000;
1888 vars->link_status |= LINK_13GTFD;
1889 break;
1890
1891 case GP_STATUS_15G:
1892 new_line_speed = SPEED_15000;
1893 vars->link_status |= LINK_15GTFD;
1894 break;
1895
1896 case GP_STATUS_16G:
1897 new_line_speed = SPEED_16000;
1898 vars->link_status |= LINK_16GTFD;
1899 break;
1900
1901 default:
1902 DP(NETIF_MSG_LINK,
1903 "link speed unsupported gp_status 0x%x\n",
1904 gp_status);
1905 return -EINVAL;
1906 }
1907
1908 /* Upon link speed change set the NIG into drain mode.
1909 Comes to deals with possible FIFO glitch due to clk change
1910 when speed is decreased without link down indicator */
1911 if (new_line_speed != vars->line_speed) {
1912 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) !=
1913 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT &&
1914 ext_phy_link_up) {
1915 DP(NETIF_MSG_LINK, "Internal link speed %d is"
1916 " different than the external"
1917 " link speed %d\n", new_line_speed,
1918 vars->line_speed);
1919 vars->phy_link_up = 0;
1920 return 0;
1921 }
1922 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
1923 + params->port*4, 0);
1924 msleep(1);
1925 }
1926 vars->line_speed = new_line_speed;
1927 vars->link_status |= LINK_STATUS_SERDES_LINK;
1928
1929 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1930 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1936 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1937 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1938 vars->autoneg = AUTO_NEG_ENABLED;
1939
1940 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
1941 vars->autoneg |= AUTO_NEG_COMPLETE;
1942 vars->link_status |=
1943 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1944 }
1945
1946 vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
1947 vars->link_status |=
1948 LINK_STATUS_PARALLEL_DETECTION_USED;
1949
1950 }
1951 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1952 vars->link_status |=
1953 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1954
1955 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1956 vars->link_status |=
1957 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1958
1959 } else { /* link_down */
1960 DP(NETIF_MSG_LINK, "phy link down\n");
1961
1962 vars->phy_link_up = 0;
1963
1964 vars->duplex = DUPLEX_FULL;
1965 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1966 vars->autoneg = AUTO_NEG_DISABLED;
1967 vars->mac_type = MAC_TYPE_NONE;
1968
1969 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1970 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1971 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT))) {
1972 /* Check signal is detected */
1973 bnx2x_check_fallback_to_cl37(params);
1974 }
1975 }
1976
1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
1978 gp_status, vars->phy_link_up, vars->line_speed);
1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
1980 " autoneg 0x%x\n",
1981 vars->duplex,
1982 vars->flow_ctrl, vars->autoneg);
1983 DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
1984
1985 return rc;
1986}
1987
1988static void bnx2x_set_gmii_tx_driver(struct link_params *params)
1989{
1990 struct bnx2x *bp = params->bp;
1991 u16 lp_up2;
1992 u16 tx_driver;
1993 u16 bank;
1994
1995 /* read precomp */
1996 CL45_RD_OVER_CL22(bp, params->port,
1997 params->phy_addr,
1998 MDIO_REG_BANK_OVER_1G,
1999 MDIO_OVER_1G_LP_UP2, &lp_up2);
2000
2001 /* bits [10:7] at lp_up2, positioned at [15:12] */
2002 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2003 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2004 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2005
2006 if (lp_up2 == 0)
2007 return;
2008
2009 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2010 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2011 CL45_RD_OVER_CL22(bp, params->port,
2012 params->phy_addr,
2013 bank,
2014 MDIO_TX0_TX_DRIVER, &tx_driver);
2015
2016 /* replace tx_driver bits [15:12] */
2017 if (lp_up2 !=
2018 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2019 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2020 tx_driver |= lp_up2;
2021 CL45_WR_OVER_CL22(bp, params->port,
2022 params->phy_addr,
2023 bank,
2024 MDIO_TX0_TX_DRIVER, tx_driver);
2025 }
2026 }
2027}
2028
2029static u8 bnx2x_emac_program(struct link_params *params,
2030 u32 line_speed, u32 duplex)
2031{
2032 struct bnx2x *bp = params->bp;
2033 u8 port = params->port;
2034 u16 mode = 0;
2035
2036 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2037 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
2038 EMAC_REG_EMAC_MODE,
2039 (EMAC_MODE_25G_MODE |
2040 EMAC_MODE_PORT_MII_10M |
2041 EMAC_MODE_HALF_DUPLEX));
2042 switch (line_speed) {
2043 case SPEED_10:
2044 mode |= EMAC_MODE_PORT_MII_10M;
2045 break;
2046
2047 case SPEED_100:
2048 mode |= EMAC_MODE_PORT_MII;
2049 break;
2050
2051 case SPEED_1000:
2052 mode |= EMAC_MODE_PORT_GMII;
2053 break;
2054
2055 case SPEED_2500:
2056 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2057 break;
2058
2059 default:
2060 /* 10G not valid for EMAC */
2061 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed);
2062 return -EINVAL;
2063 }
2064
2065 if (duplex == DUPLEX_HALF)
2066 mode |= EMAC_MODE_HALF_DUPLEX;
2067 bnx2x_bits_en(bp,
2068 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2069 mode);
2070
2071 bnx2x_set_led(params, LED_MODE_OPER, line_speed);
2072 return 0;
2073}
2074
2075/*****************************************************************************/
2076/* External Phy section */
2077/*****************************************************************************/
2078void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
2079{
2080 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2081 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2082 msleep(1);
2083 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2084 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
2085}
2086
2087static void bnx2x_ext_phy_reset(struct link_params *params,
2088 struct link_vars *vars)
2089{
2090 struct bnx2x *bp = params->bp;
2091 u32 ext_phy_type;
2092 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2093
2094 DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port);
2095 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2096 /* The PHY reset is controled by GPIO 1
2097 * Give it 1ms of reset pulse
2098 */
2099 if (vars->phy_flags & PHY_XGXS_FLAG) {
2100
2101 switch (ext_phy_type) {
2102 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2103 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2104 break;
2105
2106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2108 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
2109
2110 /* Restore normal power mode*/
2111 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2112 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2113 params->port);
2114
2115 /* HW reset */
2116 bnx2x_ext_phy_hw_reset(bp, params->port);
2117
2118 bnx2x_cl45_write(bp, params->port,
2119 ext_phy_type,
2120 ext_phy_addr,
2121 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_CTRL, 0xa040);
2123 break;
2124
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2126 break;
2127
2128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
2129
2130 /* Restore normal power mode*/
2131 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2132 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2133 params->port);
2134
2135 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2136 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2137 params->port);
2138
2139 bnx2x_cl45_write(bp, params->port,
2140 ext_phy_type,
2141 ext_phy_addr,
2142 MDIO_PMA_DEVAD,
2143 MDIO_PMA_REG_CTRL,
2144 1<<15);
2145 break;
2146
2147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2148 DP(NETIF_MSG_LINK, "XGXS 8072\n");
2149
2150 /* Unset Low Power Mode and SW reset */
2151 /* Restore normal power mode*/
2152 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2153 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2154 params->port);
2155
2156 bnx2x_cl45_write(bp, params->port,
2157 ext_phy_type,
2158 ext_phy_addr,
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_CTRL,
2161 1<<15);
2162 break;
2163
2164 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
2165 DP(NETIF_MSG_LINK, "XGXS 8073\n");
2166
2167 /* Restore normal power mode*/
2168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2169 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2170 params->port);
2171
2172 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2173 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2174 params->port);
2175 break;
2176
2177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2178 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
2179
2180 /* Restore normal power mode*/
2181 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2182 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2183 params->port);
2184
2185 /* HW reset */
2186 bnx2x_ext_phy_hw_reset(bp, params->port);
2187 break;
2188
2189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
2190 /* Restore normal power mode*/
2191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2192 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2193 params->port);
2194
2195 /* HW reset */
2196 bnx2x_ext_phy_hw_reset(bp, params->port);
2197
2198 bnx2x_cl45_write(bp, params->port,
2199 ext_phy_type,
2200 ext_phy_addr,
2201 MDIO_PMA_DEVAD,
2202 MDIO_PMA_REG_CTRL,
2203 1<<15);
2204 break;
2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2206 break;
2207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2208 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2209 break;
2210
2211 default:
2212 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2213 params->ext_phy_config);
2214 break;
2215 }
2216
2217 } else { /* SerDes */
2218 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
2219 switch (ext_phy_type) {
2220 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2221 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2222 break;
2223
2224 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2225 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2226 bnx2x_ext_phy_hw_reset(bp, params->port);
2227 break;
2228
2229 default:
2230 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2231 params->ext_phy_config);
2232 break;
2233 }
2234 }
2235}
2236
2237static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
2238 u32 shmem_base, u32 spirom_ver)
2239{
2240 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
2241 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
2242 REG_WR(bp, shmem_base +
2243 offsetof(struct shmem_region,
2244 port_mb[port].ext_phy_fw_version),
2245 spirom_ver);
2246}
2247
2248static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u8 port,
2249 u32 ext_phy_type, u8 ext_phy_addr,
2250 u32 shmem_base)
2251{
2252 u16 fw_ver1, fw_ver2;
2253
2254 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
2255 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2256 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
2257 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2258 bnx2x_save_spirom_version(bp, port, shmem_base,
2259 (u32)(fw_ver1<<16 | fw_ver2));
2260}
2261
2262
2263static void bnx2x_save_8481_spirom_version(struct bnx2x *bp, u8 port,
2264 u8 ext_phy_addr, u32 shmem_base)
2265{
2266 u16 val, fw_ver1, fw_ver2, cnt;
2267 /* For the 32 bits registers in 8481, access via MDIO2ARM interface.*/
2268 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
2269 bnx2x_cl45_write(bp, port,
2270 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2271 ext_phy_addr, MDIO_PMA_DEVAD,
2272 0xA819, 0x0014);
2273 bnx2x_cl45_write(bp, port,
2274 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2275 ext_phy_addr,
2276 MDIO_PMA_DEVAD,
2277 0xA81A,
2278 0xc200);
2279 bnx2x_cl45_write(bp, port,
2280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2281 ext_phy_addr,
2282 MDIO_PMA_DEVAD,
2283 0xA81B,
2284 0x0000);
2285 bnx2x_cl45_write(bp, port,
2286 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2287 ext_phy_addr,
2288 MDIO_PMA_DEVAD,
2289 0xA81C,
2290 0x0300);
2291 bnx2x_cl45_write(bp, port,
2292 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2293 ext_phy_addr,
2294 MDIO_PMA_DEVAD,
2295 0xA817,
2296 0x0009);
2297
2298 for (cnt = 0; cnt < 100; cnt++) {
2299 bnx2x_cl45_read(bp, port,
2300 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2301 ext_phy_addr,
2302 MDIO_PMA_DEVAD,
2303 0xA818,
2304 &val);
2305 if (val & 1)
2306 break;
2307 udelay(5);
2308 }
2309 if (cnt == 100) {
2310 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(1)\n");
2311 bnx2x_save_spirom_version(bp, port,
2312 shmem_base, 0);
2313 return;
2314 }
2315
2316
2317 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
2318 bnx2x_cl45_write(bp, port,
2319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2320 ext_phy_addr, MDIO_PMA_DEVAD,
2321 0xA819, 0x0000);
2322 bnx2x_cl45_write(bp, port,
2323 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2324 ext_phy_addr, MDIO_PMA_DEVAD,
2325 0xA81A, 0xc200);
2326 bnx2x_cl45_write(bp, port,
2327 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2328 ext_phy_addr, MDIO_PMA_DEVAD,
2329 0xA817, 0x000A);
2330 for (cnt = 0; cnt < 100; cnt++) {
2331 bnx2x_cl45_read(bp, port,
2332 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2333 ext_phy_addr,
2334 MDIO_PMA_DEVAD,
2335 0xA818,
2336 &val);
2337 if (val & 1)
2338 break;
2339 udelay(5);
2340 }
2341 if (cnt == 100) {
2342 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(2)\n");
2343 bnx2x_save_spirom_version(bp, port,
2344 shmem_base, 0);
2345 return;
2346 }
2347
2348 /* lower 16 bits of the register SPI_FW_STATUS */
2349 bnx2x_cl45_read(bp, port,
2350 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2351 ext_phy_addr,
2352 MDIO_PMA_DEVAD,
2353 0xA81B,
2354 &fw_ver1);
2355 /* upper 16 bits of register SPI_FW_STATUS */
2356 bnx2x_cl45_read(bp, port,
2357 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2358 ext_phy_addr,
2359 MDIO_PMA_DEVAD,
2360 0xA81C,
2361 &fw_ver2);
2362
2363 bnx2x_save_spirom_version(bp, port,
2364 shmem_base, (fw_ver2<<16) | fw_ver1);
2365}
2366
2367static void bnx2x_bcm8072_external_rom_boot(struct link_params *params)
2368{
2369 struct bnx2x *bp = params->bp;
2370 u8 port = params->port;
2371 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2372 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2373
2374 /* Need to wait 200ms after reset */
2375 msleep(200);
2376 /* Boot port from external ROM
2377 * Set ser_boot_ctl bit in the MISC_CTRL1 register
2378 */
2379 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2380 MDIO_PMA_DEVAD,
2381 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2382
2383 /* Reset internal microprocessor */
2384 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2385 MDIO_PMA_DEVAD,
2386 MDIO_PMA_REG_GEN_CTRL,
2387 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2388 /* set micro reset = 0 */
2389 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2390 MDIO_PMA_DEVAD,
2391 MDIO_PMA_REG_GEN_CTRL,
2392 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2393 /* Reset internal microprocessor */
2394 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2395 MDIO_PMA_DEVAD,
2396 MDIO_PMA_REG_GEN_CTRL,
2397 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2398 /* wait for 100ms for code download via SPI port */
2399 msleep(100);
2400
2401 /* Clear ser_boot_ctl bit */
2402 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2403 MDIO_PMA_DEVAD,
2404 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2405 /* Wait 100ms */
2406 msleep(100);
2407
2408 bnx2x_save_bcm_spirom_ver(bp, port,
2409 ext_phy_type,
2410 ext_phy_addr,
2411 params->shmem_base);
2412}
2413
2414static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
2415{
2416 /* This is only required for 8073A1, version 102 only */
2417
2418 struct bnx2x *bp = params->bp;
2419 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2420 u16 val;
2421
2422 /* Read 8073 HW revision*/
2423 bnx2x_cl45_read(bp, params->port,
2424 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2425 ext_phy_addr,
2426 MDIO_PMA_DEVAD,
2427 MDIO_PMA_REG_8073_CHIP_REV, &val);
2428
2429 if (val != 1) {
2430 /* No need to workaround in 8073 A1 */
2431 return 0;
2432 }
2433
2434 bnx2x_cl45_read(bp, params->port,
2435 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2436 ext_phy_addr,
2437 MDIO_PMA_DEVAD,
2438 MDIO_PMA_REG_ROM_VER2, &val);
2439
2440 /* SNR should be applied only for version 0x102 */
2441 if (val != 0x102)
2442 return 0;
2443
2444 return 1;
2445}
2446
2447static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2448{
2449 struct bnx2x *bp = params->bp;
2450 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2451 u16 val, cnt, cnt1 ;
2452
2453 bnx2x_cl45_read(bp, params->port,
2454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2455 ext_phy_addr,
2456 MDIO_PMA_DEVAD,
2457 MDIO_PMA_REG_8073_CHIP_REV, &val);
2458
2459 if (val > 0) {
2460 /* No need to workaround in 8073 A1 */
2461 return 0;
2462 }
2463 /* XAUI workaround in 8073 A0: */
2464
2465 /* After loading the boot ROM and restarting Autoneg,
2466 poll Dev1, Reg $C820: */
2467
2468 for (cnt = 0; cnt < 1000; cnt++) {
2469 bnx2x_cl45_read(bp, params->port,
2470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2471 ext_phy_addr,
2472 MDIO_PMA_DEVAD,
2473 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
2474 &val);
2475 /* If bit [14] = 0 or bit [13] = 0, continue on with
2476 system initialization (XAUI work-around not required,
2477 as these bits indicate 2.5G or 1G link up). */
2478 if (!(val & (1<<14)) || !(val & (1<<13))) {
2479 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
2480 return 0;
2481 } else if (!(val & (1<<15))) {
2482 DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
2483 /* If bit 15 is 0, then poll Dev1, Reg $C841 until
2484 it's MSB (bit 15) goes to 1 (indicating that the
2485 XAUI workaround has completed),
2486 then continue on with system initialization.*/
2487 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
2488 bnx2x_cl45_read(bp, params->port,
2489 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2490 ext_phy_addr,
2491 MDIO_PMA_DEVAD,
2492 MDIO_PMA_REG_8073_XAUI_WA, &val);
2493 if (val & (1<<15)) {
2494 DP(NETIF_MSG_LINK,
2495 "XAUI workaround has completed\n");
2496 return 0;
2497 }
2498 msleep(3);
2499 }
2500 break;
2501 }
2502 msleep(3);
2503 }
2504 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
2505 return -EINVAL;
2506}
2507
2508static void bnx2x_bcm8073_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
2509 u8 ext_phy_addr,
2510 u32 ext_phy_type,
2511 u32 shmem_base)
2512{
2513 /* Boot port from external ROM */
2514 /* EDC grst */
2515 bnx2x_cl45_write(bp, port,
2516 ext_phy_type,
2517 ext_phy_addr,
2518 MDIO_PMA_DEVAD,
2519 MDIO_PMA_REG_GEN_CTRL,
2520 0x0001);
2521
2522 /* ucode reboot and rst */
2523 bnx2x_cl45_write(bp, port,
2524 ext_phy_type,
2525 ext_phy_addr,
2526 MDIO_PMA_DEVAD,
2527 MDIO_PMA_REG_GEN_CTRL,
2528 0x008c);
2529
2530 bnx2x_cl45_write(bp, port,
2531 ext_phy_type,
2532 ext_phy_addr,
2533 MDIO_PMA_DEVAD,
2534 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2535
2536 /* Reset internal microprocessor */
2537 bnx2x_cl45_write(bp, port,
2538 ext_phy_type,
2539 ext_phy_addr,
2540 MDIO_PMA_DEVAD,
2541 MDIO_PMA_REG_GEN_CTRL,
2542 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2543
2544 /* Release srst bit */
2545 bnx2x_cl45_write(bp, port,
2546 ext_phy_type,
2547 ext_phy_addr,
2548 MDIO_PMA_DEVAD,
2549 MDIO_PMA_REG_GEN_CTRL,
2550 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2551
2552 /* wait for 100ms for code download via SPI port */
2553 msleep(100);
2554
2555 /* Clear ser_boot_ctl bit */
2556 bnx2x_cl45_write(bp, port,
2557 ext_phy_type,
2558 ext_phy_addr,
2559 MDIO_PMA_DEVAD,
2560 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2561
2562 bnx2x_save_bcm_spirom_ver(bp, port,
2563 ext_phy_type,
2564 ext_phy_addr,
2565 shmem_base);
2566}
2567
2568static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
2569 u8 ext_phy_addr,
2570 u32 shmem_base)
2571{
2572 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2573 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2574 shmem_base);
2575}
2576
2577static void bnx2x_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
2578 u8 ext_phy_addr,
2579 u32 shmem_base)
2580{
2581 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2582 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2583 shmem_base);
2584
2585}
2586
2587static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2588{
2589 struct bnx2x *bp = params->bp;
2590 u8 port = params->port;
2591 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2592 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2593
2594 /* Need to wait 100ms after reset */
2595 msleep(100);
2596
2597 /* Micro controller re-boot */
2598 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2599 MDIO_PMA_DEVAD,
2600 MDIO_PMA_REG_GEN_CTRL,
2601 0x018B);
2602
2603 /* Set soft reset */
2604 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2605 MDIO_PMA_DEVAD,
2606 MDIO_PMA_REG_GEN_CTRL,
2607 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2608
2609 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2610 MDIO_PMA_DEVAD,
2611 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2612
2613 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2614 MDIO_PMA_DEVAD,
2615 MDIO_PMA_REG_GEN_CTRL,
2616 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2617
2618 /* wait for 150ms for microcode load */
2619 msleep(150);
2620
2621 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
2622 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2623 MDIO_PMA_DEVAD,
2624 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2625
2626 msleep(200);
2627 bnx2x_save_bcm_spirom_ver(bp, port,
2628 ext_phy_type,
2629 ext_phy_addr,
2630 params->shmem_base);
2631}
2632
2633static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
2634 u32 ext_phy_type, u8 ext_phy_addr,
2635 u8 tx_en)
2636{
2637 u16 val;
2638
2639 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
2640 tx_en, port);
2641 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
2642 bnx2x_cl45_read(bp, port,
2643 ext_phy_type,
2644 ext_phy_addr,
2645 MDIO_PMA_DEVAD,
2646 MDIO_PMA_REG_PHY_IDENTIFIER,
2647 &val);
2648
2649 if (tx_en)
2650 val &= ~(1<<15);
2651 else
2652 val |= (1<<15);
2653
2654 bnx2x_cl45_write(bp, port,
2655 ext_phy_type,
2656 ext_phy_addr,
2657 MDIO_PMA_DEVAD,
2658 MDIO_PMA_REG_PHY_IDENTIFIER,
2659 val);
2660}
2661
2662static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
2663 u16 addr, u8 byte_cnt, u8 *o_buf)
2664{
2665 struct bnx2x *bp = params->bp;
2666 u16 val = 0;
2667 u16 i;
2668 u8 port = params->port;
2669 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2670 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2671
2672 if (byte_cnt > 16) {
2673 DP(NETIF_MSG_LINK, "Reading from eeprom is"
2674 " is limited to 0xf\n");
2675 return -EINVAL;
2676 }
2677 /* Set the read command byte count */
2678 bnx2x_cl45_write(bp, port,
2679 ext_phy_type,
2680 ext_phy_addr,
2681 MDIO_PMA_DEVAD,
2682 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2683 (byte_cnt | 0xa000));
2684
2685 /* Set the read command address */
2686 bnx2x_cl45_write(bp, port,
2687 ext_phy_type,
2688 ext_phy_addr,
2689 MDIO_PMA_DEVAD,
2690 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2691 addr);
2692
2693 /* Activate read command */
2694 bnx2x_cl45_write(bp, port,
2695 ext_phy_type,
2696 ext_phy_addr,
2697 MDIO_PMA_DEVAD,
2698 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2699 0x2c0f);
2700
2701 /* Wait up to 500us for command complete status */
2702 for (i = 0; i < 100; i++) {
2703 bnx2x_cl45_read(bp, port,
2704 ext_phy_type,
2705 ext_phy_addr,
2706 MDIO_PMA_DEVAD,
2707 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2708 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2709 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
2710 break;
2711 udelay(5);
2712 }
2713
2714 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
2715 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
2716 DP(NETIF_MSG_LINK,
2717 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
2718 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
2719 return -EINVAL;
2720 }
2721
2722 /* Read the buffer */
2723 for (i = 0; i < byte_cnt; i++) {
2724 bnx2x_cl45_read(bp, port,
2725 ext_phy_type,
2726 ext_phy_addr,
2727 MDIO_PMA_DEVAD,
2728 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
2729 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
2730 }
2731
2732 for (i = 0; i < 100; i++) {
2733 bnx2x_cl45_read(bp, port,
2734 ext_phy_type,
2735 ext_phy_addr,
2736 MDIO_PMA_DEVAD,
2737 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2738 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2739 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
2740 return 0;;
2741 msleep(1);
2742 }
2743 return -EINVAL;
2744}
2745
2746static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2747 u16 addr, u8 byte_cnt, u8 *o_buf)
2748{
2749 struct bnx2x *bp = params->bp;
2750 u16 val, i;
2751 u8 port = params->port;
2752 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2753 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2754
2755 if (byte_cnt > 16) {
2756 DP(NETIF_MSG_LINK, "Reading from eeprom is"
2757 " is limited to 0xf\n");
2758 return -EINVAL;
2759 }
2760
2761 /* Need to read from 1.8000 to clear it */
2762 bnx2x_cl45_read(bp, port,
2763 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2764 ext_phy_addr,
2765 MDIO_PMA_DEVAD,
2766 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2767 &val);
2768
2769 /* Set the read command byte count */
2770 bnx2x_cl45_write(bp, port,
2771 ext_phy_type,
2772 ext_phy_addr,
2773 MDIO_PMA_DEVAD,
2774 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2775 ((byte_cnt < 2) ? 2 : byte_cnt));
2776
2777 /* Set the read command address */
2778 bnx2x_cl45_write(bp, port,
2779 ext_phy_type,
2780 ext_phy_addr,
2781 MDIO_PMA_DEVAD,
2782 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2783 addr);
2784 /* Set the destination address */
2785 bnx2x_cl45_write(bp, port,
2786 ext_phy_type,
2787 ext_phy_addr,
2788 MDIO_PMA_DEVAD,
2789 0x8004,
2790 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
2791
2792 /* Activate read command */
2793 bnx2x_cl45_write(bp, port,
2794 ext_phy_type,
2795 ext_phy_addr,
2796 MDIO_PMA_DEVAD,
2797 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2798 0x8002);
2799 /* Wait appropriate time for two-wire command to finish before
2800 polling the status register */
2801 msleep(1);
2802
2803 /* Wait up to 500us for command complete status */
2804 for (i = 0; i < 100; i++) {
2805 bnx2x_cl45_read(bp, port,
2806 ext_phy_type,
2807 ext_phy_addr,
2808 MDIO_PMA_DEVAD,
2809 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2810 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2811 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
2812 break;
2813 udelay(5);
2814 }
2815
2816 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
2817 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
2818 DP(NETIF_MSG_LINK,
2819 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
2820 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
2821 return -EINVAL;
2822 }
2823
2824 /* Read the buffer */
2825 for (i = 0; i < byte_cnt; i++) {
2826 bnx2x_cl45_read(bp, port,
2827 ext_phy_type,
2828 ext_phy_addr,
2829 MDIO_PMA_DEVAD,
2830 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
2831 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
2832 }
2833
2834 for (i = 0; i < 100; i++) {
2835 bnx2x_cl45_read(bp, port,
2836 ext_phy_type,
2837 ext_phy_addr,
2838 MDIO_PMA_DEVAD,
2839 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2840 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2841 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
2842 return 0;;
2843 msleep(1);
2844 }
2845
2846 return -EINVAL;
2847}
2848
2849u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
2850 u8 byte_cnt, u8 *o_buf)
2851{
2852 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2853
2854 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
2855 return bnx2x_8726_read_sfp_module_eeprom(params, addr,
2856 byte_cnt, o_buf);
2857 else if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
2858 return bnx2x_8727_read_sfp_module_eeprom(params, addr,
2859 byte_cnt, o_buf);
2860 return -EINVAL;
2861}
2862
2863static u8 bnx2x_get_edc_mode(struct link_params *params,
2864 u16 *edc_mode)
2865{
2866 struct bnx2x *bp = params->bp;
2867 u8 val, check_limiting_mode = 0;
2868 *edc_mode = EDC_MODE_LIMITING;
2869
2870 /* First check for copper cable */
2871 if (bnx2x_read_sfp_module_eeprom(params,
2872 SFP_EEPROM_CON_TYPE_ADDR,
2873 1,
2874 &val) != 0) {
2875 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
2876 return -EINVAL;
2877 }
2878
2879 switch (val) {
2880 case SFP_EEPROM_CON_TYPE_VAL_COPPER:
2881 {
2882 u8 copper_module_type;
2883
2884 /* Check if its active cable( includes SFP+ module)
2885 of passive cable*/
2886 if (bnx2x_read_sfp_module_eeprom(params,
2887 SFP_EEPROM_FC_TX_TECH_ADDR,
2888 1,
2889 &copper_module_type) !=
2890 0) {
2891 DP(NETIF_MSG_LINK,
2892 "Failed to read copper-cable-type"
2893 " from SFP+ EEPROM\n");
2894 return -EINVAL;
2895 }
2896
2897 if (copper_module_type &
2898 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
2899 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
2900 check_limiting_mode = 1;
2901 } else if (copper_module_type &
2902 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
2903 DP(NETIF_MSG_LINK, "Passive Copper"
2904 " cable detected\n");
2905 *edc_mode =
2906 EDC_MODE_PASSIVE_DAC;
2907 } else {
2908 DP(NETIF_MSG_LINK, "Unknown copper-cable-"
2909 "type 0x%x !!!\n", copper_module_type);
2910 return -EINVAL;
2911 }
2912 break;
2913 }
2914 case SFP_EEPROM_CON_TYPE_VAL_LC:
2915 DP(NETIF_MSG_LINK, "Optic module detected\n");
2916 check_limiting_mode = 1;
2917 break;
2918 default:
2919 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
2920 val);
2921 return -EINVAL;
2922 }
2923
2924 if (check_limiting_mode) {
2925 u8 options[SFP_EEPROM_OPTIONS_SIZE];
2926 if (bnx2x_read_sfp_module_eeprom(params,
2927 SFP_EEPROM_OPTIONS_ADDR,
2928 SFP_EEPROM_OPTIONS_SIZE,
2929 options) != 0) {
2930 DP(NETIF_MSG_LINK, "Failed to read Option"
2931 " field from module EEPROM\n");
2932 return -EINVAL;
2933 }
2934 if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
2935 *edc_mode = EDC_MODE_LINEAR;
2936 else
2937 *edc_mode = EDC_MODE_LIMITING;
2938 }
2939 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
2940 return 0;
2941}
2942
2943/* This function read the relevant field from the module ( SFP+ ),
2944 and verify it is compliant with this board */
2945static u8 bnx2x_verify_sfp_module(struct link_params *params)
2946{
2947 struct bnx2x *bp = params->bp;
2948 u32 val;
2949 u32 fw_resp;
2950 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
2951 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
2952
2953 val = REG_RD(bp, params->shmem_base +
2954 offsetof(struct shmem_region, dev_info.
2955 port_feature_config[params->port].config));
2956 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
2957 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
2958 DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
2959 return 0;
2960 }
2961
2962 /* Ask the FW to validate the module */
2963 if (!(params->feature_config_flags &
2964 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY)) {
2965 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
2966 "verification\n");
2967 return -EINVAL;
2968 }
2969
2970 fw_resp = bnx2x_fw_command(bp, DRV_MSG_CODE_VRFY_OPT_MDL);
2971 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
2972 DP(NETIF_MSG_LINK, "Approved module\n");
2973 return 0;
2974 }
2975
2976 /* format the warning message */
2977 if (bnx2x_read_sfp_module_eeprom(params,
2978 SFP_EEPROM_VENDOR_NAME_ADDR,
2979 SFP_EEPROM_VENDOR_NAME_SIZE,
2980 (u8 *)vendor_name))
2981 vendor_name[0] = '\0';
2982 else
2983 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
2984 if (bnx2x_read_sfp_module_eeprom(params,
2985 SFP_EEPROM_PART_NO_ADDR,
2986 SFP_EEPROM_PART_NO_SIZE,
2987 (u8 *)vendor_pn))
2988 vendor_pn[0] = '\0';
2989 else
2990 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
2991
2992 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n",
2993 params->port, vendor_name, vendor_pn);
2994 return -EINVAL;
2995}
2996
2997static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
2998 u16 edc_mode)
2999{
3000 struct bnx2x *bp = params->bp;
3001 u8 port = params->port;
3002 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3003 u16 cur_limiting_mode;
3004
3005 bnx2x_cl45_read(bp, port,
3006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3007 ext_phy_addr,
3008 MDIO_PMA_DEVAD,
3009 MDIO_PMA_REG_ROM_VER2,
3010 &cur_limiting_mode);
3011 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
3012 cur_limiting_mode);
3013
3014 if (edc_mode == EDC_MODE_LIMITING) {
3015 DP(NETIF_MSG_LINK,
3016 "Setting LIMITING MODE\n");
3017 bnx2x_cl45_write(bp, port,
3018 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3019 ext_phy_addr,
3020 MDIO_PMA_DEVAD,
3021 MDIO_PMA_REG_ROM_VER2,
3022 EDC_MODE_LIMITING);
3023 } else { /* LRM mode ( default )*/
3024
3025 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
3026
3027 /* Changing to LRM mode takes quite few seconds.
3028 So do it only if current mode is limiting
3029 ( default is LRM )*/
3030 if (cur_limiting_mode != EDC_MODE_LIMITING)
3031 return 0;
3032
3033 bnx2x_cl45_write(bp, port,
3034 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3035 ext_phy_addr,
3036 MDIO_PMA_DEVAD,
3037 MDIO_PMA_REG_LRM_MODE,
3038 0);
3039 bnx2x_cl45_write(bp, port,
3040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3041 ext_phy_addr,
3042 MDIO_PMA_DEVAD,
3043 MDIO_PMA_REG_ROM_VER2,
3044 0x128);
3045 bnx2x_cl45_write(bp, port,
3046 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3047 ext_phy_addr,
3048 MDIO_PMA_DEVAD,
3049 MDIO_PMA_REG_MISC_CTRL0,
3050 0x4008);
3051 bnx2x_cl45_write(bp, port,
3052 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3053 ext_phy_addr,
3054 MDIO_PMA_DEVAD,
3055 MDIO_PMA_REG_LRM_MODE,
3056 0xaaaa);
3057 }
3058 return 0;
3059}
3060
3061static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
3062 u16 edc_mode)
3063{
3064 struct bnx2x *bp = params->bp;
3065 u8 port = params->port;
3066 u16 phy_identifier;
3067 u16 rom_ver2_val;
3068 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3069
3070 bnx2x_cl45_read(bp, port,
3071 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3072 ext_phy_addr,
3073 MDIO_PMA_DEVAD,
3074 MDIO_PMA_REG_PHY_IDENTIFIER,
3075 &phy_identifier);
3076
3077 bnx2x_cl45_write(bp, port,
3078 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3079 ext_phy_addr,
3080 MDIO_PMA_DEVAD,
3081 MDIO_PMA_REG_PHY_IDENTIFIER,
3082 (phy_identifier & ~(1<<9)));
3083
3084 bnx2x_cl45_read(bp, port,
3085 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3086 ext_phy_addr,
3087 MDIO_PMA_DEVAD,
3088 MDIO_PMA_REG_ROM_VER2,
3089 &rom_ver2_val);
3090 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
3091 bnx2x_cl45_write(bp, port,
3092 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3093 ext_phy_addr,
3094 MDIO_PMA_DEVAD,
3095 MDIO_PMA_REG_ROM_VER2,
3096 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
3097
3098 bnx2x_cl45_write(bp, port,
3099 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3100 ext_phy_addr,
3101 MDIO_PMA_DEVAD,
3102 MDIO_PMA_REG_PHY_IDENTIFIER,
3103 (phy_identifier | (1<<9)));
3104
3105 return 0;
3106}
3107
3108
3109static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params)
3110{
3111 u8 val;
3112 struct bnx2x *bp = params->bp;
3113 u16 timeout;
3114 /* Initialization time after hot-plug may take up to 300ms for some
3115 phys type ( e.g. JDSU ) */
3116 for (timeout = 0; timeout < 60; timeout++) {
3117 if (bnx2x_read_sfp_module_eeprom(params, 1, 1, &val)
3118 == 0) {
3119 DP(NETIF_MSG_LINK, "SFP+ module initialization "
3120 "took %d ms\n", timeout * 5);
3121 return 0;
3122 }
3123 msleep(5);
3124 }
3125 return -EINVAL;
3126}
3127
3128static void bnx2x_8727_power_module(struct bnx2x *bp,
3129 struct link_params *params,
3130 u8 ext_phy_addr, u8 is_power_up) {
3131 /* Make sure GPIOs are not using for LED mode */
3132 u16 val;
3133 u8 port = params->port;
3134 /*
3135 * In the GPIO register, bit 4 is use to detemine if the GPIOs are
3136 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
3137 * output
3138 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
3139 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
3140 * where the 1st bit is the over-current(only input), and 2nd bit is
3141 * for power( only output )
3142 */
3143
3144 /*
3145 * In case of NOC feature is disabled and power is up, set GPIO control
3146 * as input to enable listening of over-current indication
3147 */
3148
3149 if (!(params->feature_config_flags &
3150 FEATURE_CONFIG_BCM8727_NOC) && is_power_up)
3151 val = (1<<4);
3152 else
3153 /*
3154 * Set GPIO control to OUTPUT, and set the power bit
3155 * to according to the is_power_up
3156 */
3157 val = ((!(is_power_up)) << 1);
3158
3159 bnx2x_cl45_write(bp, port,
3160 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3161 ext_phy_addr,
3162 MDIO_PMA_DEVAD,
3163 MDIO_PMA_REG_8727_GPIO_CTRL,
3164 val);
3165}
3166
3167static u8 bnx2x_sfp_module_detection(struct link_params *params)
3168{
3169 struct bnx2x *bp = params->bp;
3170 u16 edc_mode;
3171 u8 rc = 0;
3172 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3173 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3174 u32 val = REG_RD(bp, params->shmem_base +
3175 offsetof(struct shmem_region, dev_info.
3176 port_feature_config[params->port].config));
3177
3178 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
3179 params->port);
3180
3181 if (bnx2x_get_edc_mode(params, &edc_mode) != 0) {
3182 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
3183 return -EINVAL;
3184 } else if (bnx2x_verify_sfp_module(params) !=
3185 0) {
3186 /* check SFP+ module compatibility */
3187 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
3188 rc = -EINVAL;
3189 /* Turn on fault module-detected led */
3190 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3191 MISC_REGISTERS_GPIO_HIGH,
3192 params->port);
3193 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
3194 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3195 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
3196 /* Shutdown SFP+ module */
3197 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
3198 bnx2x_8727_power_module(bp, params,
3199 ext_phy_addr, 0);
3200 return rc;
3201 }
3202 } else {
3203 /* Turn off fault module-detected led */
3204 DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
3205 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3206 MISC_REGISTERS_GPIO_LOW,
3207 params->port);
3208 }
3209
3210 /* power up the SFP module */
3211 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
3212 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
3213
3214 /* Check and set limiting mode / LRM mode on 8726.
3215 On 8727 it is done automatically */
3216 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
3217 bnx2x_bcm8726_set_limiting_mode(params, edc_mode);
3218 else
3219 bnx2x_bcm8727_set_limiting_mode(params, edc_mode);
3220 /*
3221 * Enable transmit for this module if the module is approved, or
3222 * if unapproved modules should also enable the Tx laser
3223 */
3224 if (rc == 0 ||
3225 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
3226 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3227 bnx2x_sfp_set_transmitter(bp, params->port,
3228 ext_phy_type, ext_phy_addr, 1);
3229 else
3230 bnx2x_sfp_set_transmitter(bp, params->port,
3231 ext_phy_type, ext_phy_addr, 0);
3232
3233 return rc;
3234}
3235
3236void bnx2x_handle_module_detect_int(struct link_params *params)
3237{
3238 struct bnx2x *bp = params->bp;
3239 u32 gpio_val;
3240 u8 port = params->port;
3241
3242 /* Set valid module led off */
3243 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3244 MISC_REGISTERS_GPIO_HIGH,
3245 params->port);
3246
3247 /* Get current gpio val refelecting module plugged in / out*/
3248 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
3249
3250 /* Call the handling function in case module is detected */
3251 if (gpio_val == 0) {
3252
3253 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3254 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
3255 port);
3256
3257 if (bnx2x_wait_for_sfp_module_initialized(params) ==
3258 0)
3259 bnx2x_sfp_module_detection(params);
3260 else
3261 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
3262 } else {
3263 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3264
3265 u32 ext_phy_type =
3266 XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3267 u32 val = REG_RD(bp, params->shmem_base +
3268 offsetof(struct shmem_region, dev_info.
3269 port_feature_config[params->port].
3270 config));
3271
3272 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3273 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
3274 port);
3275 /* Module was plugged out. */
3276 /* Disable transmit for this module */
3277 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3278 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3279 bnx2x_sfp_set_transmitter(bp, params->port,
3280 ext_phy_type, ext_phy_addr, 0);
3281 }
3282}
3283
3284static void bnx2x_bcm807x_force_10G(struct link_params *params)
3285{
3286 struct bnx2x *bp = params->bp;
3287 u8 port = params->port;
3288 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3289 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3290
3291 /* Force KR or KX */
3292 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3293 MDIO_PMA_DEVAD,
3294 MDIO_PMA_REG_CTRL,
3295 0x2040);
3296 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3297 MDIO_PMA_DEVAD,
3298 MDIO_PMA_REG_10G_CTRL2,
3299 0x000b);
3300 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3301 MDIO_PMA_DEVAD,
3302 MDIO_PMA_REG_BCM_CTRL,
3303 0x0000);
3304 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3305 MDIO_AN_DEVAD,
3306 MDIO_AN_REG_CTRL,
3307 0x0000);
3308}
3309
3310static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
3311{
3312 struct bnx2x *bp = params->bp;
3313 u8 port = params->port;
3314 u16 val;
3315 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3316 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3317
3318 bnx2x_cl45_read(bp, params->port,
3319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
3320 ext_phy_addr,
3321 MDIO_PMA_DEVAD,
3322 MDIO_PMA_REG_8073_CHIP_REV, &val);
3323
3324 if (val == 0) {
3325 /* Mustn't set low power mode in 8073 A0 */
3326 return;
3327 }
3328
3329 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3330 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3331 MDIO_XS_DEVAD,
3332 MDIO_XS_PLL_SEQUENCER, &val);
3333 val &= ~(1<<13);
3334 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3335 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3336
3337 /* PLL controls */
3338 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3339 MDIO_XS_DEVAD, 0x805E, 0x1077);
3340 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3341 MDIO_XS_DEVAD, 0x805D, 0x0000);
3342 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3343 MDIO_XS_DEVAD, 0x805C, 0x030B);
3344 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3345 MDIO_XS_DEVAD, 0x805B, 0x1240);
3346 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3347 MDIO_XS_DEVAD, 0x805A, 0x2490);
3348
3349 /* Tx Controls */
3350 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3351 MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3352 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3353 MDIO_XS_DEVAD, 0x80A6, 0x9041);
3354 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3355 MDIO_XS_DEVAD, 0x80A5, 0x4640);
3356
3357 /* Rx Controls */
3358 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3359 MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3360 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3361 MDIO_XS_DEVAD, 0x80FD, 0x9249);
3362 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3363 MDIO_XS_DEVAD, 0x80FC, 0x2015);
3364
3365 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3366 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3367 MDIO_XS_DEVAD,
3368 MDIO_XS_PLL_SEQUENCER, &val);
3369 val |= (1<<13);
3370 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3371 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3372}
3373
3374static void bnx2x_8073_set_pause_cl37(struct link_params *params,
3375 struct link_vars *vars)
3376{
3377 struct bnx2x *bp = params->bp;
3378 u16 cl37_val;
3379 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3380 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3381
3382 bnx2x_cl45_read(bp, params->port,
3383 ext_phy_type,
3384 ext_phy_addr,
3385 MDIO_AN_DEVAD,
3386 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
3387
3388 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3389 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3390
3391 if ((vars->ieee_fc &
3392 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
3393 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
3394 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
3395 }
3396 if ((vars->ieee_fc &
3397 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3399 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3400 }
3401 if ((vars->ieee_fc &
3402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3403 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3404 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3405 }
3406 DP(NETIF_MSG_LINK,
3407 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
3408
3409 bnx2x_cl45_write(bp, params->port,
3410 ext_phy_type,
3411 ext_phy_addr,
3412 MDIO_AN_DEVAD,
3413 MDIO_AN_REG_CL37_FC_LD, cl37_val);
3414 msleep(500);
3415}
3416
3417static void bnx2x_ext_phy_set_pause(struct link_params *params,
3418 struct link_vars *vars)
3419{
3420 struct bnx2x *bp = params->bp;
3421 u16 val;
3422 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3423 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3424
3425 /* read modify write pause advertizing */
3426 bnx2x_cl45_read(bp, params->port,
3427 ext_phy_type,
3428 ext_phy_addr,
3429 MDIO_AN_DEVAD,
3430 MDIO_AN_REG_ADV_PAUSE, &val);
3431
3432 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
3433
3434 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3435
3436 if ((vars->ieee_fc &
3437 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3438 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3439 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3440 }
3441 if ((vars->ieee_fc &
3442 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3443 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3444 val |=
3445 MDIO_AN_REG_ADV_PAUSE_PAUSE;
3446 }
3447 DP(NETIF_MSG_LINK,
3448 "Ext phy AN advertize 0x%x\n", val);
3449 bnx2x_cl45_write(bp, params->port,
3450 ext_phy_type,
3451 ext_phy_addr,
3452 MDIO_AN_DEVAD,
3453 MDIO_AN_REG_ADV_PAUSE, val);
3454}
3455static void bnx2x_set_preemphasis(struct link_params *params)
3456{
3457 u16 bank, i = 0;
3458 struct bnx2x *bp = params->bp;
3459
3460 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
3461 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
3462 CL45_WR_OVER_CL22(bp, params->port,
3463 params->phy_addr,
3464 bank,
3465 MDIO_RX0_RX_EQ_BOOST,
3466 params->xgxs_config_rx[i]);
3467 }
3468
3469 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
3470 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
3471 CL45_WR_OVER_CL22(bp, params->port,
3472 params->phy_addr,
3473 bank,
3474 MDIO_TX0_TX_DRIVER,
3475 params->xgxs_config_tx[i]);
3476 }
3477}
3478
3479
3480static void bnx2x_8481_set_led4(struct link_params *params,
3481 u32 ext_phy_type, u8 ext_phy_addr)
3482{
3483 struct bnx2x *bp = params->bp;
3484
3485 /* PHYC_CTL_LED_CTL */
3486 bnx2x_cl45_write(bp, params->port,
3487 ext_phy_type,
3488 ext_phy_addr,
3489 MDIO_PMA_DEVAD,
3490 MDIO_PMA_REG_8481_LINK_SIGNAL, 0xa482);
3491
3492 /* Unmask LED4 for 10G link */
3493 bnx2x_cl45_write(bp, params->port,
3494 ext_phy_type,
3495 ext_phy_addr,
3496 MDIO_PMA_DEVAD,
3497 MDIO_PMA_REG_8481_SIGNAL_MASK, (1<<6));
3498 /* 'Interrupt Mask' */
3499 bnx2x_cl45_write(bp, params->port,
3500 ext_phy_type,
3501 ext_phy_addr,
3502 MDIO_AN_DEVAD,
3503 0xFFFB, 0xFFFD);
3504}
3505static void bnx2x_8481_set_legacy_led_mode(struct link_params *params,
3506 u32 ext_phy_type, u8 ext_phy_addr)
3507{
3508 struct bnx2x *bp = params->bp;
3509
3510 /* LED1 (10G Link): Disable LED1 when 10/100/1000 link */
3511 /* LED2 (1G/100/10 Link): Enable LED2 when 10/100/1000 link) */
3512 bnx2x_cl45_write(bp, params->port,
3513 ext_phy_type,
3514 ext_phy_addr,
3515 MDIO_AN_DEVAD,
3516 MDIO_AN_REG_8481_LEGACY_SHADOW,
3517 (1<<15) | (0xd << 10) | (0xc<<4) | 0xe);
3518}
3519
3520static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3521 u32 ext_phy_type, u8 ext_phy_addr)
3522{
3523 struct bnx2x *bp = params->bp;
3524 u16 val1;
3525
3526 /* LED1 (10G Link) */
3527 /* Enable continuse based on source 7(10G-link) */
3528 bnx2x_cl45_read(bp, params->port,
3529 ext_phy_type,
3530 ext_phy_addr,
3531 MDIO_PMA_DEVAD,
3532 MDIO_PMA_REG_8481_LINK_SIGNAL,
3533 &val1);
3534 /* Set bit 2 to 0, and bits [1:0] to 10 */
3535 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
3536 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
3537
3538 bnx2x_cl45_write(bp, params->port,
3539 ext_phy_type,
3540 ext_phy_addr,
3541 MDIO_PMA_DEVAD,
3542 MDIO_PMA_REG_8481_LINK_SIGNAL,
3543 val1);
3544
3545 /* Unmask LED1 for 10G link */
3546 bnx2x_cl45_read(bp, params->port,
3547 ext_phy_type,
3548 ext_phy_addr,
3549 MDIO_PMA_DEVAD,
3550 MDIO_PMA_REG_8481_LED1_MASK,
3551 &val1);
3552 /* Set bit 2 to 0, and bits [1:0] to 10 */
3553 val1 |= (1<<7);
3554 bnx2x_cl45_write(bp, params->port,
3555 ext_phy_type,
3556 ext_phy_addr,
3557 MDIO_PMA_DEVAD,
3558 MDIO_PMA_REG_8481_LED1_MASK,
3559 val1);
3560
3561 /* LED2 (1G/100/10G Link) */
3562 /* Mask LED2 for 10G link */
3563 bnx2x_cl45_write(bp, params->port,
3564 ext_phy_type,
3565 ext_phy_addr,
3566 MDIO_PMA_DEVAD,
3567 MDIO_PMA_REG_8481_LED2_MASK,
3568 0);
3569
3570 /* Unmask LED3 for 10G link */
3571 bnx2x_cl45_write(bp, params->port,
3572 ext_phy_type,
3573 ext_phy_addr,
3574 MDIO_PMA_DEVAD,
3575 MDIO_PMA_REG_8481_LED3_MASK,
3576 0x6);
3577 bnx2x_cl45_write(bp, params->port,
3578 ext_phy_type,
3579 ext_phy_addr,
3580 MDIO_PMA_DEVAD,
3581 MDIO_PMA_REG_8481_LED3_BLINK,
3582 0);
3583}
3584
3585
3586static void bnx2x_init_internal_phy(struct link_params *params,
3587 struct link_vars *vars,
3588 u8 enable_cl73)
3589{
3590 struct bnx2x *bp = params->bp;
3591
3592 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
3593 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
3594 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3595 (params->feature_config_flags &
3596 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
3597 bnx2x_set_preemphasis(params);
3598
3599 /* forced speed requested? */
3600 if (vars->line_speed != SPEED_AUTO_NEG ||
3601 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
3602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3603 params->loopback_mode == LOOPBACK_EXT)) {
3604 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3605
3606 /* disable autoneg */
3607 bnx2x_set_autoneg(params, vars, 0);
3608
3609 /* program speed and duplex */
3610 bnx2x_program_serdes(params, vars);
3611
3612 } else { /* AN_mode */
3613 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3614
3615 /* AN enabled */
3616 bnx2x_set_brcm_cl37_advertisment(params);
3617
3618 /* program duplex & pause advertisement (for aneg) */
3619 bnx2x_set_ieee_aneg_advertisment(params,
3620 vars->ieee_fc);
3621
3622 /* enable autoneg */
3623 bnx2x_set_autoneg(params, vars, enable_cl73);
3624
3625 /* enable and restart AN */
3626 bnx2x_restart_autoneg(params, enable_cl73);
3627 }
3628
3629 } else { /* SGMII mode */
3630 DP(NETIF_MSG_LINK, "SGMII\n");
3631
3632 bnx2x_initialize_sgmii_process(params, vars);
3633 }
3634}
3635
3636static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3637{
3638 struct bnx2x *bp = params->bp;
3639 u32 ext_phy_type;
3640 u8 ext_phy_addr;
3641 u16 cnt;
3642 u16 ctrl = 0;
3643 u16 val = 0;
3644 u8 rc = 0;
3645
3646 if (vars->phy_flags & PHY_XGXS_FLAG) {
3647 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3648
3649 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3650 /* Make sure that the soft reset is off (expect for the 8072:
3651 * due to the lock, it will be done inside the specific
3652 * handling)
3653 */
3654 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3655 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3656 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3657 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) &&
3658 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) {
3659 /* Wait for soft reset to get cleared upto 1 sec */
3660 for (cnt = 0; cnt < 1000; cnt++) {
3661 bnx2x_cl45_read(bp, params->port,
3662 ext_phy_type,
3663 ext_phy_addr,
3664 MDIO_PMA_DEVAD,
3665 MDIO_PMA_REG_CTRL, &ctrl);
3666 if (!(ctrl & (1<<15)))
3667 break;
3668 msleep(1);
3669 }
3670 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
3671 ctrl, cnt);
3672 }
3673
3674 switch (ext_phy_type) {
3675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3676 break;
3677
3678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3679 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3680
3681 bnx2x_cl45_write(bp, params->port,
3682 ext_phy_type,
3683 ext_phy_addr,
3684 MDIO_PMA_DEVAD,
3685 MDIO_PMA_REG_MISC_CTRL,
3686 0x8288);
3687 bnx2x_cl45_write(bp, params->port,
3688 ext_phy_type,
3689 ext_phy_addr,
3690 MDIO_PMA_DEVAD,
3691 MDIO_PMA_REG_PHY_IDENTIFIER,
3692 0x7fbf);
3693 bnx2x_cl45_write(bp, params->port,
3694 ext_phy_type,
3695 ext_phy_addr,
3696 MDIO_PMA_DEVAD,
3697 MDIO_PMA_REG_CMU_PLL_BYPASS,
3698 0x0100);
3699 bnx2x_cl45_write(bp, params->port,
3700 ext_phy_type,
3701 ext_phy_addr,
3702 MDIO_WIS_DEVAD,
3703 MDIO_WIS_REG_LASI_CNTL, 0x1);
3704
3705 /* BCM8705 doesn't have microcode, hence the 0 */
3706 bnx2x_save_spirom_version(bp, params->port,
3707 params->shmem_base, 0);
3708 break;
3709
3710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3711 /* Wait until fw is loaded */
3712 for (cnt = 0; cnt < 100; cnt++) {
3713 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3714 ext_phy_addr, MDIO_PMA_DEVAD,
3715 MDIO_PMA_REG_ROM_VER1, &val);
3716 if (val)
3717 break;
3718 msleep(10);
3719 }
3720 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized "
3721 "after %d ms\n", cnt);
3722 if ((params->feature_config_flags &
3723 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3724 u8 i;
3725 u16 reg;
3726 for (i = 0; i < 4; i++) {
3727 reg = MDIO_XS_8706_REG_BANK_RX0 +
3728 i*(MDIO_XS_8706_REG_BANK_RX1 -
3729 MDIO_XS_8706_REG_BANK_RX0);
3730 bnx2x_cl45_read(bp, params->port,
3731 ext_phy_type,
3732 ext_phy_addr,
3733 MDIO_XS_DEVAD,
3734 reg, &val);
3735 /* Clear first 3 bits of the control */
3736 val &= ~0x7;
3737 /* Set control bits according to
3738 configuation */
3739 val |= (params->xgxs_config_rx[i] &
3740 0x7);
3741 DP(NETIF_MSG_LINK, "Setting RX"
3742 "Equalizer to BCM8706 reg 0x%x"
3743 " <-- val 0x%x\n", reg, val);
3744 bnx2x_cl45_write(bp, params->port,
3745 ext_phy_type,
3746 ext_phy_addr,
3747 MDIO_XS_DEVAD,
3748 reg, val);
3749 }
3750 }
3751 /* Force speed */
3752 if (params->req_line_speed == SPEED_10000) {
3753 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3754
3755 bnx2x_cl45_write(bp, params->port,
3756 ext_phy_type,
3757 ext_phy_addr,
3758 MDIO_PMA_DEVAD,
3759 MDIO_PMA_REG_DIGITAL_CTRL,
3760 0x400);
3761 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3762 ext_phy_addr, MDIO_PMA_DEVAD,
3763 MDIO_PMA_REG_LASI_CTRL, 1);
3764 } else {
3765 /* Force 1Gbps using autoneg with 1G
3766 advertisment */
3767
3768 /* Allow CL37 through CL73 */
3769 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3770 bnx2x_cl45_write(bp, params->port,
3771 ext_phy_type,
3772 ext_phy_addr,
3773 MDIO_AN_DEVAD,
3774 MDIO_AN_REG_CL37_CL73,
3775 0x040c);
3776
3777 /* Enable Full-Duplex advertisment on CL37 */
3778 bnx2x_cl45_write(bp, params->port,
3779 ext_phy_type,
3780 ext_phy_addr,
3781 MDIO_AN_DEVAD,
3782 MDIO_AN_REG_CL37_FC_LP,
3783 0x0020);
3784 /* Enable CL37 AN */
3785 bnx2x_cl45_write(bp, params->port,
3786 ext_phy_type,
3787 ext_phy_addr,
3788 MDIO_AN_DEVAD,
3789 MDIO_AN_REG_CL37_AN,
3790 0x1000);
3791 /* 1G support */
3792 bnx2x_cl45_write(bp, params->port,
3793 ext_phy_type,
3794 ext_phy_addr,
3795 MDIO_AN_DEVAD,
3796 MDIO_AN_REG_ADV, (1<<5));
3797
3798 /* Enable clause 73 AN */
3799 bnx2x_cl45_write(bp, params->port,
3800 ext_phy_type,
3801 ext_phy_addr,
3802 MDIO_AN_DEVAD,
3803 MDIO_AN_REG_CTRL,
3804 0x1200);
3805 bnx2x_cl45_write(bp, params->port,
3806 ext_phy_type,
3807 ext_phy_addr,
3808 MDIO_PMA_DEVAD,
3809 MDIO_PMA_REG_RX_ALARM_CTRL,
3810 0x0400);
3811 bnx2x_cl45_write(bp, params->port,
3812 ext_phy_type,
3813 ext_phy_addr,
3814 MDIO_PMA_DEVAD,
3815 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3816
3817 }
3818 bnx2x_save_bcm_spirom_ver(bp, params->port,
3819 ext_phy_type,
3820 ext_phy_addr,
3821 params->shmem_base);
3822 break;
3823 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3824 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
3825 bnx2x_bcm8726_external_rom_boot(params);
3826
3827 /* Need to call module detected on initialization since
3828 the module detection triggered by actual module
3829 insertion might occur before driver is loaded, and when
3830 driver is loaded, it reset all registers, including the
3831 transmitter */
3832 bnx2x_sfp_module_detection(params);
3833
3834 /* Set Flow control */
3835 bnx2x_ext_phy_set_pause(params, vars);
3836 if (params->req_line_speed == SPEED_1000) {
3837 DP(NETIF_MSG_LINK, "Setting 1G force\n");
3838 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3839 ext_phy_addr, MDIO_PMA_DEVAD,
3840 MDIO_PMA_REG_CTRL, 0x40);
3841 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3842 ext_phy_addr, MDIO_PMA_DEVAD,
3843 MDIO_PMA_REG_10G_CTRL2, 0xD);
3844 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3845 ext_phy_addr, MDIO_PMA_DEVAD,
3846 MDIO_PMA_REG_LASI_CTRL, 0x5);
3847 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3848 ext_phy_addr, MDIO_PMA_DEVAD,
3849 MDIO_PMA_REG_RX_ALARM_CTRL,
3850 0x400);
3851 } else if ((params->req_line_speed ==
3852 SPEED_AUTO_NEG) &&
3853 ((params->speed_cap_mask &
3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
3855 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
3856 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3857 ext_phy_addr, MDIO_AN_DEVAD,
3858 MDIO_AN_REG_ADV, 0x20);
3859 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3860 ext_phy_addr, MDIO_AN_DEVAD,
3861 MDIO_AN_REG_CL37_CL73, 0x040c);
3862 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3863 ext_phy_addr, MDIO_AN_DEVAD,
3864 MDIO_AN_REG_CL37_FC_LD, 0x0020);
3865 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3866 ext_phy_addr, MDIO_AN_DEVAD,
3867 MDIO_AN_REG_CL37_AN, 0x1000);
3868 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3869 ext_phy_addr, MDIO_AN_DEVAD,
3870 MDIO_AN_REG_CTRL, 0x1200);
3871
3872 /* Enable RX-ALARM control to receive
3873 interrupt for 1G speed change */
3874 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3875 ext_phy_addr, MDIO_PMA_DEVAD,
3876 MDIO_PMA_REG_LASI_CTRL, 0x4);
3877 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3878 ext_phy_addr, MDIO_PMA_DEVAD,
3879 MDIO_PMA_REG_RX_ALARM_CTRL,
3880 0x400);
3881
3882 } else { /* Default 10G. Set only LASI control */
3883 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3884 ext_phy_addr, MDIO_PMA_DEVAD,
3885 MDIO_PMA_REG_LASI_CTRL, 1);
3886 }
3887
3888 /* Set TX PreEmphasis if needed */
3889 if ((params->feature_config_flags &
3890 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3891 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
3892 "TX_CTRL2 0x%x\n",
3893 params->xgxs_config_tx[0],
3894 params->xgxs_config_tx[1]);
3895 bnx2x_cl45_write(bp, params->port,
3896 ext_phy_type,
3897 ext_phy_addr,
3898 MDIO_PMA_DEVAD,
3899 MDIO_PMA_REG_8726_TX_CTRL1,
3900 params->xgxs_config_tx[0]);
3901
3902 bnx2x_cl45_write(bp, params->port,
3903 ext_phy_type,
3904 ext_phy_addr,
3905 MDIO_PMA_DEVAD,
3906 MDIO_PMA_REG_8726_TX_CTRL2,
3907 params->xgxs_config_tx[1]);
3908 }
3909 break;
3910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3912 {
3913 u16 tmp1;
3914 u16 rx_alarm_ctrl_val;
3915 u16 lasi_ctrl_val;
3916 if (ext_phy_type ==
3917 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
3918 rx_alarm_ctrl_val = 0x400;
3919 lasi_ctrl_val = 0x0004;
3920 } else {
3921 rx_alarm_ctrl_val = (1<<2);
3922 lasi_ctrl_val = 0x0004;
3923 }
3924
3925 /* enable LASI */
3926 bnx2x_cl45_write(bp, params->port,
3927 ext_phy_type,
3928 ext_phy_addr,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_RX_ALARM_CTRL,
3931 rx_alarm_ctrl_val);
3932
3933 bnx2x_cl45_write(bp, params->port,
3934 ext_phy_type,
3935 ext_phy_addr,
3936 MDIO_PMA_DEVAD,
3937 MDIO_PMA_REG_LASI_CTRL,
3938 lasi_ctrl_val);
3939
3940 bnx2x_8073_set_pause_cl37(params, vars);
3941
3942 if (ext_phy_type ==
3943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)
3944 bnx2x_bcm8072_external_rom_boot(params);
3945 else
3946 /* In case of 8073 with long xaui lines,
3947 don't set the 8073 xaui low power*/
3948 bnx2x_bcm8073_set_xaui_low_power_mode(params);
3949
3950 bnx2x_cl45_read(bp, params->port,
3951 ext_phy_type,
3952 ext_phy_addr,
3953 MDIO_PMA_DEVAD,
3954 MDIO_PMA_REG_M8051_MSGOUT_REG,
3955 &tmp1);
3956
3957 bnx2x_cl45_read(bp, params->port,
3958 ext_phy_type,
3959 ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_RX_ALARM, &tmp1);
3962
3963 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
3964 "0x%x\n", tmp1);
3965
3966 /* If this is forced speed, set to KR or KX
3967 * (all other are not supported)
3968 */
3969 if (params->loopback_mode == LOOPBACK_EXT) {
3970 bnx2x_bcm807x_force_10G(params);
3971 DP(NETIF_MSG_LINK,
3972 "Forced speed 10G on 807X\n");
3973 break;
3974 } else {
3975 bnx2x_cl45_write(bp, params->port,
3976 ext_phy_type, ext_phy_addr,
3977 MDIO_PMA_DEVAD,
3978 MDIO_PMA_REG_BCM_CTRL,
3979 0x0002);
3980 }
3981 if (params->req_line_speed != SPEED_AUTO_NEG) {
3982 if (params->req_line_speed == SPEED_10000) {
3983 val = (1<<7);
3984 } else if (params->req_line_speed ==
3985 SPEED_2500) {
3986 val = (1<<5);
3987 /* Note that 2.5G works only
3988 when used with 1G advertisment */
3989 } else
3990 val = (1<<5);
3991 } else {
3992
3993 val = 0;
3994 if (params->speed_cap_mask &
3995 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
3996 val |= (1<<7);
3997
3998 /* Note that 2.5G works only when
3999 used with 1G advertisment */
4000 if (params->speed_cap_mask &
4001 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4002 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
4003 val |= (1<<5);
4004 DP(NETIF_MSG_LINK,
4005 "807x autoneg val = 0x%x\n", val);
4006 }
4007
4008 bnx2x_cl45_write(bp, params->port,
4009 ext_phy_type,
4010 ext_phy_addr,
4011 MDIO_AN_DEVAD,
4012 MDIO_AN_REG_ADV, val);
4013 if (ext_phy_type ==
4014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4015 bnx2x_cl45_read(bp, params->port,
4016 ext_phy_type,
4017 ext_phy_addr,
4018 MDIO_AN_DEVAD,
4019 MDIO_AN_REG_8073_2_5G, &tmp1);
4020
4021 if (((params->speed_cap_mask &
4022 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
4023 (params->req_line_speed ==
4024 SPEED_AUTO_NEG)) ||
4025 (params->req_line_speed ==
4026 SPEED_2500)) {
4027 u16 phy_ver;
4028 /* Allow 2.5G for A1 and above */
4029 bnx2x_cl45_read(bp, params->port,
4030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4031 ext_phy_addr,
4032 MDIO_PMA_DEVAD,
4033 MDIO_PMA_REG_8073_CHIP_REV, &phy_ver);
4034 DP(NETIF_MSG_LINK, "Add 2.5G\n");
4035 if (phy_ver > 0)
4036 tmp1 |= 1;
4037 else
4038 tmp1 &= 0xfffe;
4039 } else {
4040 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
4041 tmp1 &= 0xfffe;
4042 }
4043
4044 bnx2x_cl45_write(bp, params->port,
4045 ext_phy_type,
4046 ext_phy_addr,
4047 MDIO_AN_DEVAD,
4048 MDIO_AN_REG_8073_2_5G, tmp1);
4049 }
4050
4051 /* Add support for CL37 (passive mode) II */
4052
4053 bnx2x_cl45_read(bp, params->port,
4054 ext_phy_type,
4055 ext_phy_addr,
4056 MDIO_AN_DEVAD,
4057 MDIO_AN_REG_CL37_FC_LD,
4058 &tmp1);
4059
4060 bnx2x_cl45_write(bp, params->port,
4061 ext_phy_type,
4062 ext_phy_addr,
4063 MDIO_AN_DEVAD,
4064 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
4065 ((params->req_duplex == DUPLEX_FULL) ?
4066 0x20 : 0x40)));
4067
4068 /* Add support for CL37 (passive mode) III */
4069 bnx2x_cl45_write(bp, params->port,
4070 ext_phy_type,
4071 ext_phy_addr,
4072 MDIO_AN_DEVAD,
4073 MDIO_AN_REG_CL37_AN, 0x1000);
4074
4075 if (ext_phy_type ==
4076 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4077 /* The SNR will improve about 2db by changing
4078 BW and FEE main tap. Rest commands are executed
4079 after link is up*/
4080 /*Change FFE main cursor to 5 in EDC register*/
4081 if (bnx2x_8073_is_snr_needed(params))
4082 bnx2x_cl45_write(bp, params->port,
4083 ext_phy_type,
4084 ext_phy_addr,
4085 MDIO_PMA_DEVAD,
4086 MDIO_PMA_REG_EDC_FFE_MAIN,
4087 0xFB0C);
4088
4089 /* Enable FEC (Forware Error Correction)
4090 Request in the AN */
4091 bnx2x_cl45_read(bp, params->port,
4092 ext_phy_type,
4093 ext_phy_addr,
4094 MDIO_AN_DEVAD,
4095 MDIO_AN_REG_ADV2, &tmp1);
4096
4097 tmp1 |= (1<<15);
4098
4099 bnx2x_cl45_write(bp, params->port,
4100 ext_phy_type,
4101 ext_phy_addr,
4102 MDIO_AN_DEVAD,
4103 MDIO_AN_REG_ADV2, tmp1);
4104
4105 }
4106
4107 bnx2x_ext_phy_set_pause(params, vars);
4108
4109 /* Restart autoneg */
4110 msleep(500);
4111 bnx2x_cl45_write(bp, params->port,
4112 ext_phy_type,
4113 ext_phy_addr,
4114 MDIO_AN_DEVAD,
4115 MDIO_AN_REG_CTRL, 0x1200);
4116 DP(NETIF_MSG_LINK, "807x Autoneg Restart: "
4117 "Advertise 1G=%x, 10G=%x\n",
4118 ((val & (1<<5)) > 0),
4119 ((val & (1<<7)) > 0));
4120 break;
4121 }
4122
4123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4124 {
4125 u16 tmp1;
4126 u16 rx_alarm_ctrl_val;
4127 u16 lasi_ctrl_val;
4128
4129 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
4130
4131 u16 mod_abs;
4132 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
4133 lasi_ctrl_val = 0x0004;
4134
4135 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
4136 /* enable LASI */
4137 bnx2x_cl45_write(bp, params->port,
4138 ext_phy_type,
4139 ext_phy_addr,
4140 MDIO_PMA_DEVAD,
4141 MDIO_PMA_REG_RX_ALARM_CTRL,
4142 rx_alarm_ctrl_val);
4143
4144 bnx2x_cl45_write(bp, params->port,
4145 ext_phy_type,
4146 ext_phy_addr,
4147 MDIO_PMA_DEVAD,
4148 MDIO_PMA_REG_LASI_CTRL,
4149 lasi_ctrl_val);
4150
4151 /* Initially configure MOD_ABS to interrupt when
4152 module is presence( bit 8) */
4153 bnx2x_cl45_read(bp, params->port,
4154 ext_phy_type,
4155 ext_phy_addr,
4156 MDIO_PMA_DEVAD,
4157 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4158 /* Set EDC off by setting OPTXLOS signal input to low
4159 (bit 9).
4160 When the EDC is off it locks onto a reference clock and
4161 avoids becoming 'lost'.*/
4162 mod_abs &= ~((1<<8) | (1<<9));
4163 bnx2x_cl45_write(bp, params->port,
4164 ext_phy_type,
4165 ext_phy_addr,
4166 MDIO_PMA_DEVAD,
4167 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4168
4169 /* Make MOD_ABS give interrupt on change */
4170 bnx2x_cl45_read(bp, params->port,
4171 ext_phy_type,
4172 ext_phy_addr,
4173 MDIO_PMA_DEVAD,
4174 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4175 &val);
4176 val |= (1<<12);
4177 bnx2x_cl45_write(bp, params->port,
4178 ext_phy_type,
4179 ext_phy_addr,
4180 MDIO_PMA_DEVAD,
4181 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4182 val);
4183
4184 /* Set 8727 GPIOs to input to allow reading from the
4185 8727 GPIO0 status which reflect SFP+ module
4186 over-current */
4187
4188 bnx2x_cl45_read(bp, params->port,
4189 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4190 ext_phy_addr,
4191 MDIO_PMA_DEVAD,
4192 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4193 &val);
4194 val &= 0xff8f; /* Reset bits 4-6 */
4195 bnx2x_cl45_write(bp, params->port,
4196 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4197 ext_phy_addr,
4198 MDIO_PMA_DEVAD,
4199 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4200 val);
4201
4202 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
4203 bnx2x_bcm8073_set_xaui_low_power_mode(params);
4204
4205 bnx2x_cl45_read(bp, params->port,
4206 ext_phy_type,
4207 ext_phy_addr,
4208 MDIO_PMA_DEVAD,
4209 MDIO_PMA_REG_M8051_MSGOUT_REG,
4210 &tmp1);
4211
4212 bnx2x_cl45_read(bp, params->port,
4213 ext_phy_type,
4214 ext_phy_addr,
4215 MDIO_PMA_DEVAD,
4216 MDIO_PMA_REG_RX_ALARM, &tmp1);
4217
4218 /* Set option 1G speed */
4219 if (params->req_line_speed == SPEED_1000) {
4220
4221 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4222 bnx2x_cl45_write(bp, params->port,
4223 ext_phy_type,
4224 ext_phy_addr,
4225 MDIO_PMA_DEVAD,
4226 MDIO_PMA_REG_CTRL, 0x40);
4227 bnx2x_cl45_write(bp, params->port,
4228 ext_phy_type,
4229 ext_phy_addr,
4230 MDIO_PMA_DEVAD,
4231 MDIO_PMA_REG_10G_CTRL2, 0xD);
4232 bnx2x_cl45_read(bp, params->port,
4233 ext_phy_type,
4234 ext_phy_addr,
4235 MDIO_PMA_DEVAD,
4236 MDIO_PMA_REG_10G_CTRL2, &tmp1);
4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4238
4239 } else if ((params->req_line_speed ==
4240 SPEED_AUTO_NEG) &&
4241 ((params->speed_cap_mask &
4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
4243
4244 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4245 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4246 ext_phy_addr, MDIO_AN_DEVAD,
4247 MDIO_PMA_REG_8727_MISC_CTRL, 0);
4248 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4249 ext_phy_addr, MDIO_AN_DEVAD,
4250 MDIO_AN_REG_CL37_AN, 0x1300);
4251 } else {
4252 /* Since the 8727 has only single reset pin,
4253 need to set the 10G registers although it is
4254 default */
4255 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4256 ext_phy_addr, MDIO_AN_DEVAD,
4257 MDIO_AN_REG_CTRL, 0x0020);
4258 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4259 ext_phy_addr, MDIO_AN_DEVAD,
4260 0x7, 0x0100);
4261 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4262 ext_phy_addr, MDIO_PMA_DEVAD,
4263 MDIO_PMA_REG_CTRL, 0x2040);
4264 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4265 ext_phy_addr, MDIO_PMA_DEVAD,
4266 MDIO_PMA_REG_10G_CTRL2, 0x0008);
4267 }
4268
4269 /* Set 2-wire transfer rate of SFP+ module EEPROM
4270 * to 100Khz since some DACs(direct attached cables) do
4271 * not work at 400Khz.
4272 */
4273 bnx2x_cl45_write(bp, params->port,
4274 ext_phy_type,
4275 ext_phy_addr,
4276 MDIO_PMA_DEVAD,
4277 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4278 0xa001);
4279
4280 /* Set TX PreEmphasis if needed */
4281 if ((params->feature_config_flags &
4282 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4283 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
4284 "TX_CTRL2 0x%x\n",
4285 params->xgxs_config_tx[0],
4286 params->xgxs_config_tx[1]);
4287 bnx2x_cl45_write(bp, params->port,
4288 ext_phy_type,
4289 ext_phy_addr,
4290 MDIO_PMA_DEVAD,
4291 MDIO_PMA_REG_8727_TX_CTRL1,
4292 params->xgxs_config_tx[0]);
4293
4294 bnx2x_cl45_write(bp, params->port,
4295 ext_phy_type,
4296 ext_phy_addr,
4297 MDIO_PMA_DEVAD,
4298 MDIO_PMA_REG_8727_TX_CTRL2,
4299 params->xgxs_config_tx[1]);
4300 }
4301
4302 break;
4303 }
4304
4305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4306 {
4307 u16 fw_ver1, fw_ver2;
4308 DP(NETIF_MSG_LINK,
4309 "Setting the SFX7101 LASI indication\n");
4310
4311 bnx2x_cl45_write(bp, params->port,
4312 ext_phy_type,
4313 ext_phy_addr,
4314 MDIO_PMA_DEVAD,
4315 MDIO_PMA_REG_LASI_CTRL, 0x1);
4316 DP(NETIF_MSG_LINK,
4317 "Setting the SFX7101 LED to blink on traffic\n");
4318 bnx2x_cl45_write(bp, params->port,
4319 ext_phy_type,
4320 ext_phy_addr,
4321 MDIO_PMA_DEVAD,
4322 MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
4323
4324 bnx2x_ext_phy_set_pause(params, vars);
4325 /* Restart autoneg */
4326 bnx2x_cl45_read(bp, params->port,
4327 ext_phy_type,
4328 ext_phy_addr,
4329 MDIO_AN_DEVAD,
4330 MDIO_AN_REG_CTRL, &val);
4331 val |= 0x200;
4332 bnx2x_cl45_write(bp, params->port,
4333 ext_phy_type,
4334 ext_phy_addr,
4335 MDIO_AN_DEVAD,
4336 MDIO_AN_REG_CTRL, val);
4337
4338 /* Save spirom version */
4339 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4340 ext_phy_addr, MDIO_PMA_DEVAD,
4341 MDIO_PMA_REG_7101_VER1, &fw_ver1);
4342
4343 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4344 ext_phy_addr, MDIO_PMA_DEVAD,
4345 MDIO_PMA_REG_7101_VER2, &fw_ver2);
4346
4347 bnx2x_save_spirom_version(params->bp, params->port,
4348 params->shmem_base,
4349 (u32)(fw_ver1<<16 | fw_ver2));
4350 break;
4351 }
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
4353 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
4354 /* This phy uses the NIG latch mechanism since link
4355 indication arrives through its LED4 and not via
4356 its LASI signal, so we get steady signal
4357 instead of clear on read */
4358 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
4359 1 << NIG_LATCH_BC_ENABLE_MI_INT);
4360
4361 bnx2x_cl45_write(bp, params->port,
4362 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
4363 ext_phy_addr,
4364 MDIO_PMA_DEVAD,
4365 MDIO_PMA_REG_CTRL, 0x0000);
4366
4367 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
4368 if (params->req_line_speed == SPEED_AUTO_NEG) {
4369
4370 u16 autoneg_val, an_1000_val, an_10_100_val;
4371 /* set 1000 speed advertisement */
4372 bnx2x_cl45_read(bp, params->port,
4373 ext_phy_type,
4374 ext_phy_addr,
4375 MDIO_AN_DEVAD,
4376 MDIO_AN_REG_8481_1000T_CTRL,
4377 &an_1000_val);
4378
4379 if (params->speed_cap_mask &
4380 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
4381 an_1000_val |= (1<<8);
4382 if (params->req_duplex == DUPLEX_FULL)
4383 an_1000_val |= (1<<9);
4384 DP(NETIF_MSG_LINK, "Advertising 1G\n");
4385 } else
4386 an_1000_val &= ~((1<<8) | (1<<9));
4387
4388 bnx2x_cl45_write(bp, params->port,
4389 ext_phy_type,
4390 ext_phy_addr,
4391 MDIO_AN_DEVAD,
4392 MDIO_AN_REG_8481_1000T_CTRL,
4393 an_1000_val);
4394
4395 /* set 100 speed advertisement */
4396 bnx2x_cl45_read(bp, params->port,
4397 ext_phy_type,
4398 ext_phy_addr,
4399 MDIO_AN_DEVAD,
4400 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4401 &an_10_100_val);
4402
4403 if (params->speed_cap_mask &
4404 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
4405 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
4406 an_10_100_val |= (1<<7);
4407 if (params->req_duplex == DUPLEX_FULL)
4408 an_10_100_val |= (1<<8);
4409 DP(NETIF_MSG_LINK,
4410 "Advertising 100M\n");
4411 } else
4412 an_10_100_val &= ~((1<<7) | (1<<8));
4413
4414 /* set 10 speed advertisement */
4415 if (params->speed_cap_mask &
4416 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
4417 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
4418 an_10_100_val |= (1<<5);
4419 if (params->req_duplex == DUPLEX_FULL)
4420 an_10_100_val |= (1<<6);
4421 DP(NETIF_MSG_LINK, "Advertising 10M\n");
4422 }
4423 else
4424 an_10_100_val &= ~((1<<5) | (1<<6));
4425
4426 bnx2x_cl45_write(bp, params->port,
4427 ext_phy_type,
4428 ext_phy_addr,
4429 MDIO_AN_DEVAD,
4430 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4431 an_10_100_val);
4432
4433 bnx2x_cl45_read(bp, params->port,
4434 ext_phy_type,
4435 ext_phy_addr,
4436 MDIO_AN_DEVAD,
4437 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4438 &autoneg_val);
4439
4440 /* Disable forced speed */
4441 autoneg_val &= ~(1<<6|1<<13);
4442
4443 /* Enable autoneg and restart autoneg
4444 for legacy speeds */
4445 autoneg_val |= (1<<9|1<<12);
4446
4447 if (params->req_duplex == DUPLEX_FULL)
4448 autoneg_val |= (1<<8);
4449 else
4450 autoneg_val &= ~(1<<8);
4451
4452 bnx2x_cl45_write(bp, params->port,
4453 ext_phy_type,
4454 ext_phy_addr,
4455 MDIO_AN_DEVAD,
4456 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4457 autoneg_val);
4458
4459 if (params->speed_cap_mask &
4460 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4461 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4462 /* Restart autoneg for 10G*/
4463
4464 bnx2x_cl45_write(bp, params->port,
4465 ext_phy_type,
4466 ext_phy_addr,
4467 MDIO_AN_DEVAD,
4468 MDIO_AN_REG_CTRL, 0x3200);
4469 }
4470 } else {
4471 /* Force speed */
4472 u16 autoneg_ctrl, pma_ctrl;
4473 bnx2x_cl45_read(bp, params->port,
4474 ext_phy_type,
4475 ext_phy_addr,
4476 MDIO_AN_DEVAD,
4477 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4478 &autoneg_ctrl);
4479
4480 /* Disable autoneg */
4481 autoneg_ctrl &= ~(1<<12);
4482
4483 /* Set 1000 force */
4484 switch (params->req_line_speed) {
4485 case SPEED_10000:
4486 DP(NETIF_MSG_LINK,
4487 "Unable to set 10G force !\n");
4488 break;
4489 case SPEED_1000:
4490 bnx2x_cl45_read(bp, params->port,
4491 ext_phy_type,
4492 ext_phy_addr,
4493 MDIO_PMA_DEVAD,
4494 MDIO_PMA_REG_CTRL,
4495 &pma_ctrl);
4496 autoneg_ctrl &= ~(1<<13);
4497 autoneg_ctrl |= (1<<6);
4498 pma_ctrl &= ~(1<<13);
4499 pma_ctrl |= (1<<6);
4500 DP(NETIF_MSG_LINK,
4501 "Setting 1000M force\n");
4502 bnx2x_cl45_write(bp, params->port,
4503 ext_phy_type,
4504 ext_phy_addr,
4505 MDIO_PMA_DEVAD,
4506 MDIO_PMA_REG_CTRL,
4507 pma_ctrl);
4508 break;
4509 case SPEED_100:
4510 autoneg_ctrl |= (1<<13);
4511 autoneg_ctrl &= ~(1<<6);
4512 DP(NETIF_MSG_LINK,
4513 "Setting 100M force\n");
4514 break;
4515 case SPEED_10:
4516 autoneg_ctrl &= ~(1<<13);
4517 autoneg_ctrl &= ~(1<<6);
4518 DP(NETIF_MSG_LINK,
4519 "Setting 10M force\n");
4520 break;
4521 }
4522
4523 /* Duplex mode */
4524 if (params->req_duplex == DUPLEX_FULL) {
4525 autoneg_ctrl |= (1<<8);
4526 DP(NETIF_MSG_LINK,
4527 "Setting full duplex\n");
4528 } else
4529 autoneg_ctrl &= ~(1<<8);
4530
4531 /* Update autoneg ctrl and pma ctrl */
4532 bnx2x_cl45_write(bp, params->port,
4533 ext_phy_type,
4534 ext_phy_addr,
4535 MDIO_AN_DEVAD,
4536 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4537 autoneg_ctrl);
4538 }
4539
4540 /* Save spirom version */
4541 bnx2x_save_8481_spirom_version(bp, params->port,
4542 ext_phy_addr,
4543 params->shmem_base);
4544 break;
4545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
4546 DP(NETIF_MSG_LINK,
4547 "XGXS PHY Failure detected 0x%x\n",
4548 params->ext_phy_config);
4549 rc = -EINVAL;
4550 break;
4551 default:
4552 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
4553 params->ext_phy_config);
4554 rc = -EINVAL;
4555 break;
4556 }
4557
4558 } else { /* SerDes */
4559
4560 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
4561 switch (ext_phy_type) {
4562 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
4563 DP(NETIF_MSG_LINK, "SerDes Direct\n");
4564 break;
4565
4566 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
4567 DP(NETIF_MSG_LINK, "SerDes 5482\n");
4568 break;
4569
4570 default:
4571 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
4572 params->ext_phy_config);
4573 break;
4574 }
4575 }
4576 return rc;
4577}
4578
4579static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4580{
4581 struct bnx2x *bp = params->bp;
4582 u16 mod_abs, rx_alarm_status;
4583 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
4584 u32 val = REG_RD(bp, params->shmem_base +
4585 offsetof(struct shmem_region, dev_info.
4586 port_feature_config[params->port].
4587 config));
4588 bnx2x_cl45_read(bp, params->port,
4589 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4590 ext_phy_addr,
4591 MDIO_PMA_DEVAD,
4592 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4593 if (mod_abs & (1<<8)) {
4594
4595 /* Module is absent */
4596 DP(NETIF_MSG_LINK, "MOD_ABS indication "
4597 "show module is absent\n");
4598
4599 /* 1. Set mod_abs to detect next module
4600 presence event
4601 2. Set EDC off by setting OPTXLOS signal input to low
4602 (bit 9).
4603 When the EDC is off it locks onto a reference clock and
4604 avoids becoming 'lost'.*/
4605 mod_abs &= ~((1<<8)|(1<<9));
4606 bnx2x_cl45_write(bp, params->port,
4607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4608 ext_phy_addr,
4609 MDIO_PMA_DEVAD,
4610 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4611
4612 /* Clear RX alarm since it stays up as long as
4613 the mod_abs wasn't changed */
4614 bnx2x_cl45_read(bp, params->port,
4615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4616 ext_phy_addr,
4617 MDIO_PMA_DEVAD,
4618 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4619
4620 } else {
4621 /* Module is present */
4622 DP(NETIF_MSG_LINK, "MOD_ABS indication "
4623 "show module is present\n");
4624 /* First thing, disable transmitter,
4625 and if the module is ok, the
4626 module_detection will enable it*/
4627
4628 /* 1. Set mod_abs to detect next module
4629 absent event ( bit 8)
4630 2. Restore the default polarity of the OPRXLOS signal and
4631 this signal will then correctly indicate the presence or
4632 absence of the Rx signal. (bit 9) */
4633 mod_abs |= ((1<<8)|(1<<9));
4634 bnx2x_cl45_write(bp, params->port,
4635 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4636 ext_phy_addr,
4637 MDIO_PMA_DEVAD,
4638 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4639
4640 /* Clear RX alarm since it stays up as long as
4641 the mod_abs wasn't changed. This is need to be done
4642 before calling the module detection, otherwise it will clear
4643 the link update alarm */
4644 bnx2x_cl45_read(bp, params->port,
4645 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4646 ext_phy_addr,
4647 MDIO_PMA_DEVAD,
4648 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4649
4650
4651 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4652 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
4653 bnx2x_sfp_set_transmitter(bp, params->port,
4654 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4655 ext_phy_addr, 0);
4656
4657 if (bnx2x_wait_for_sfp_module_initialized(params)
4658 == 0)
4659 bnx2x_sfp_module_detection(params);
4660 else
4661 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
4662 }
4663
4664 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4665 rx_alarm_status);
4666 /* No need to check link status in case of
4667 module plugged in/out */
4668}
4669
4670
4671static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
4672 struct link_vars *vars,
4673 u8 is_mi_int)
4674{
4675 struct bnx2x *bp = params->bp;
4676 u32 ext_phy_type;
4677 u8 ext_phy_addr;
4678 u16 val1 = 0, val2;
4679 u16 rx_sd, pcs_status;
4680 u8 ext_phy_link_up = 0;
4681 u8 port = params->port;
4682
4683 if (vars->phy_flags & PHY_XGXS_FLAG) {
4684 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
4685 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
4686 switch (ext_phy_type) {
4687 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
4688 DP(NETIF_MSG_LINK, "XGXS Direct\n");
4689 ext_phy_link_up = 1;
4690 break;
4691
4692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
4693 DP(NETIF_MSG_LINK, "XGXS 8705\n");
4694 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4695 ext_phy_addr,
4696 MDIO_WIS_DEVAD,
4697 MDIO_WIS_REG_LASI_STATUS, &val1);
4698 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4699
4700 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4701 ext_phy_addr,
4702 MDIO_WIS_DEVAD,
4703 MDIO_WIS_REG_LASI_STATUS, &val1);
4704 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4705
4706 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4707 ext_phy_addr,
4708 MDIO_PMA_DEVAD,
4709 MDIO_PMA_REG_RX_SD, &rx_sd);
4710
4711 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4712 ext_phy_addr,
4713 1,
4714 0xc809, &val1);
4715 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4716 ext_phy_addr,
4717 1,
4718 0xc809, &val1);
4719
4720 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
4721 ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
4722 ((val1 & (1<<8)) == 0));
4723 if (ext_phy_link_up)
4724 vars->line_speed = SPEED_10000;
4725 break;
4726
4727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
4728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4729 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
4730 /* Clear RX Alarm*/
4731 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4732 ext_phy_addr,
4733 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4734 &val2);
4735 /* clear LASI indication*/
4736 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4737 ext_phy_addr,
4738 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4739 &val1);
4740 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4741 ext_phy_addr,
4742 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4743 &val2);
4744 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x-->"
4745 "0x%x\n", val1, val2);
4746
4747 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4748 ext_phy_addr,
4749 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD,
4750 &rx_sd);
4751 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4752 ext_phy_addr,
4753 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS,
4754 &pcs_status);
4755 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4756 ext_phy_addr,
4757 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4758 &val2);
4759 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4760 ext_phy_addr,
4761 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4762 &val2);
4763
4764 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x"
4765 " pcs_status 0x%x 1Gbps link_status 0x%x\n",
4766 rx_sd, pcs_status, val2);
4767 /* link is up if both bit 0 of pmd_rx_sd and
4768 * bit 0 of pcs_status are set, or if the autoneg bit
4769 1 is set
4770 */
4771 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
4772 (val2 & (1<<1)));
4773 if (ext_phy_link_up) {
4774 if (ext_phy_type ==
4775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
4776 /* If transmitter is disabled,
4777 ignore false link up indication */
4778 bnx2x_cl45_read(bp, params->port,
4779 ext_phy_type,
4780 ext_phy_addr,
4781 MDIO_PMA_DEVAD,
4782 MDIO_PMA_REG_PHY_IDENTIFIER,
4783 &val1);
4784 if (val1 & (1<<15)) {
4785 DP(NETIF_MSG_LINK, "Tx is "
4786 "disabled\n");
4787 ext_phy_link_up = 0;
4788 break;
4789 }
4790 }
4791 if (val2 & (1<<1))
4792 vars->line_speed = SPEED_1000;
4793 else
4794 vars->line_speed = SPEED_10000;
4795 }
4796 break;
4797
4798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4799 {
4800 u16 link_status = 0;
4801 u16 rx_alarm_status;
4802 /* Check the LASI */
4803 bnx2x_cl45_read(bp, params->port,
4804 ext_phy_type,
4805 ext_phy_addr,
4806 MDIO_PMA_DEVAD,
4807 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4808
4809 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4810 rx_alarm_status);
4811
4812 bnx2x_cl45_read(bp, params->port,
4813 ext_phy_type,
4814 ext_phy_addr,
4815 MDIO_PMA_DEVAD,
4816 MDIO_PMA_REG_LASI_STATUS, &val1);
4817
4818 DP(NETIF_MSG_LINK,
4819 "8727 LASI status 0x%x\n",
4820 val1);
4821
4822 /* Clear MSG-OUT */
4823 bnx2x_cl45_read(bp, params->port,
4824 ext_phy_type,
4825 ext_phy_addr,
4826 MDIO_PMA_DEVAD,
4827 MDIO_PMA_REG_M8051_MSGOUT_REG,
4828 &val1);
4829
4830 /*
4831 * If a module is present and there is need to check
4832 * for over current
4833 */
4834 if (!(params->feature_config_flags &
4835 FEATURE_CONFIG_BCM8727_NOC) &&
4836 !(rx_alarm_status & (1<<5))) {
4837 /* Check over-current using 8727 GPIO0 input*/
4838 bnx2x_cl45_read(bp, params->port,
4839 ext_phy_type,
4840 ext_phy_addr,
4841 MDIO_PMA_DEVAD,
4842 MDIO_PMA_REG_8727_GPIO_CTRL,
4843 &val1);
4844
4845 if ((val1 & (1<<8)) == 0) {
4846 DP(NETIF_MSG_LINK, "8727 Power fault"
4847 " has been detected on "
4848 "port %d\n",
4849 params->port);
4850 netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
4851 params->port);
4852 /*
4853 * Disable all RX_ALARMs except for
4854 * mod_abs
4855 */
4856 bnx2x_cl45_write(bp, params->port,
4857 ext_phy_type,
4858 ext_phy_addr,
4859 MDIO_PMA_DEVAD,
4860 MDIO_PMA_REG_RX_ALARM_CTRL,
4861 (1<<5));
4862
4863 bnx2x_cl45_read(bp, params->port,
4864 ext_phy_type,
4865 ext_phy_addr,
4866 MDIO_PMA_DEVAD,
4867 MDIO_PMA_REG_PHY_IDENTIFIER,
4868 &val1);
4869 /* Wait for module_absent_event */
4870 val1 |= (1<<8);
4871 bnx2x_cl45_write(bp, params->port,
4872 ext_phy_type,
4873 ext_phy_addr,
4874 MDIO_PMA_DEVAD,
4875 MDIO_PMA_REG_PHY_IDENTIFIER,
4876 val1);
4877 /* Clear RX alarm */
4878 bnx2x_cl45_read(bp, params->port,
4879 ext_phy_type,
4880 ext_phy_addr,
4881 MDIO_PMA_DEVAD,
4882 MDIO_PMA_REG_RX_ALARM,
4883 &rx_alarm_status);
4884 break;
4885 }
4886 } /* Over current check */
4887
4888 /* When module absent bit is set, check module */
4889 if (rx_alarm_status & (1<<5)) {
4890 bnx2x_8727_handle_mod_abs(params);
4891 /* Enable all mod_abs and link detection bits */
4892 bnx2x_cl45_write(bp, params->port,
4893 ext_phy_type,
4894 ext_phy_addr,
4895 MDIO_PMA_DEVAD,
4896 MDIO_PMA_REG_RX_ALARM_CTRL,
4897 ((1<<5) | (1<<2)));
4898 }
4899
4900 /* If transmitter is disabled,
4901 ignore false link up indication */
4902 bnx2x_cl45_read(bp, params->port,
4903 ext_phy_type,
4904 ext_phy_addr,
4905 MDIO_PMA_DEVAD,
4906 MDIO_PMA_REG_PHY_IDENTIFIER,
4907 &val1);
4908 if (val1 & (1<<15)) {
4909 DP(NETIF_MSG_LINK, "Tx is disabled\n");
4910 ext_phy_link_up = 0;
4911 break;
4912 }
4913
4914 bnx2x_cl45_read(bp, params->port,
4915 ext_phy_type,
4916 ext_phy_addr,
4917 MDIO_PMA_DEVAD,
4918 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
4919 &link_status);
4920
4921 /* Bits 0..2 --> speed detected,
4922 bits 13..15--> link is down */
4923 if ((link_status & (1<<2)) &&
4924 (!(link_status & (1<<15)))) {
4925 ext_phy_link_up = 1;
4926 vars->line_speed = SPEED_10000;
4927 } else if ((link_status & (1<<0)) &&
4928 (!(link_status & (1<<13)))) {
4929 ext_phy_link_up = 1;
4930 vars->line_speed = SPEED_1000;
4931 DP(NETIF_MSG_LINK,
4932 "port %x: External link"
4933 " up in 1G\n", params->port);
4934 } else {
4935 ext_phy_link_up = 0;
4936 DP(NETIF_MSG_LINK,
4937 "port %x: External link"
4938 " is down\n", params->port);
4939 }
4940 break;
4941 }
4942
4943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4945 {
4946 u16 link_status = 0;
4947 u16 an1000_status = 0;
4948
4949 if (ext_phy_type ==
4950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
4951 bnx2x_cl45_read(bp, params->port,
4952 ext_phy_type,
4953 ext_phy_addr,
4954 MDIO_PCS_DEVAD,
4955 MDIO_PCS_REG_LASI_STATUS, &val1);
4956 bnx2x_cl45_read(bp, params->port,
4957 ext_phy_type,
4958 ext_phy_addr,
4959 MDIO_PCS_DEVAD,
4960 MDIO_PCS_REG_LASI_STATUS, &val2);
4961 DP(NETIF_MSG_LINK,
4962 "870x LASI status 0x%x->0x%x\n",
4963 val1, val2);
4964 } else {
4965 /* In 8073, port1 is directed through emac0 and
4966 * port0 is directed through emac1
4967 */
4968 bnx2x_cl45_read(bp, params->port,
4969 ext_phy_type,
4970 ext_phy_addr,
4971 MDIO_PMA_DEVAD,
4972 MDIO_PMA_REG_LASI_STATUS, &val1);
4973
4974 DP(NETIF_MSG_LINK,
4975 "8703 LASI status 0x%x\n",
4976 val1);
4977 }
4978
4979 /* clear the interrupt LASI status register */
4980 bnx2x_cl45_read(bp, params->port,
4981 ext_phy_type,
4982 ext_phy_addr,
4983 MDIO_PCS_DEVAD,
4984 MDIO_PCS_REG_STATUS, &val2);
4985 bnx2x_cl45_read(bp, params->port,
4986 ext_phy_type,
4987 ext_phy_addr,
4988 MDIO_PCS_DEVAD,
4989 MDIO_PCS_REG_STATUS, &val1);
4990 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
4991 val2, val1);
4992 /* Clear MSG-OUT */
4993 bnx2x_cl45_read(bp, params->port,
4994 ext_phy_type,
4995 ext_phy_addr,
4996 MDIO_PMA_DEVAD,
4997 MDIO_PMA_REG_M8051_MSGOUT_REG,
4998 &val1);
4999
5000 /* Check the LASI */
5001 bnx2x_cl45_read(bp, params->port,
5002 ext_phy_type,
5003 ext_phy_addr,
5004 MDIO_PMA_DEVAD,
5005 MDIO_PMA_REG_RX_ALARM, &val2);
5006
5007 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
5008
5009 /* Check the link status */
5010 bnx2x_cl45_read(bp, params->port,
5011 ext_phy_type,
5012 ext_phy_addr,
5013 MDIO_PCS_DEVAD,
5014 MDIO_PCS_REG_STATUS, &val2);
5015 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
5016
5017 bnx2x_cl45_read(bp, params->port,
5018 ext_phy_type,
5019 ext_phy_addr,
5020 MDIO_PMA_DEVAD,
5021 MDIO_PMA_REG_STATUS, &val2);
5022 bnx2x_cl45_read(bp, params->port,
5023 ext_phy_type,
5024 ext_phy_addr,
5025 MDIO_PMA_DEVAD,
5026 MDIO_PMA_REG_STATUS, &val1);
5027 ext_phy_link_up = ((val1 & 4) == 4);
5028 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
5029 if (ext_phy_type ==
5030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
5031
5032 if (ext_phy_link_up &&
5033 ((params->req_line_speed !=
5034 SPEED_10000))) {
5035 if (bnx2x_bcm8073_xaui_wa(params)
5036 != 0) {
5037 ext_phy_link_up = 0;
5038 break;
5039 }
5040 }
5041 bnx2x_cl45_read(bp, params->port,
5042 ext_phy_type,
5043 ext_phy_addr,
5044 MDIO_AN_DEVAD,
5045 MDIO_AN_REG_LINK_STATUS,
5046 &an1000_status);
5047 bnx2x_cl45_read(bp, params->port,
5048 ext_phy_type,
5049 ext_phy_addr,
5050 MDIO_AN_DEVAD,
5051 MDIO_AN_REG_LINK_STATUS,
5052 &an1000_status);
5053
5054 /* Check the link status on 1.1.2 */
5055 bnx2x_cl45_read(bp, params->port,
5056 ext_phy_type,
5057 ext_phy_addr,
5058 MDIO_PMA_DEVAD,
5059 MDIO_PMA_REG_STATUS, &val2);
5060 bnx2x_cl45_read(bp, params->port,
5061 ext_phy_type,
5062 ext_phy_addr,
5063 MDIO_PMA_DEVAD,
5064 MDIO_PMA_REG_STATUS, &val1);
5065 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
5066 "an_link_status=0x%x\n",
5067 val2, val1, an1000_status);
5068
5069 ext_phy_link_up = (((val1 & 4) == 4) ||
5070 (an1000_status & (1<<1)));
5071 if (ext_phy_link_up &&
5072 bnx2x_8073_is_snr_needed(params)) {
5073 /* The SNR will improve about 2dbby
5074 changing the BW and FEE main tap.*/
5075
5076 /* The 1st write to change FFE main
5077 tap is set before restart AN */
5078 /* Change PLL Bandwidth in EDC
5079 register */
5080 bnx2x_cl45_write(bp, port, ext_phy_type,
5081 ext_phy_addr,
5082 MDIO_PMA_DEVAD,
5083 MDIO_PMA_REG_PLL_BANDWIDTH,
5084 0x26BC);
5085
5086 /* Change CDR Bandwidth in EDC
5087 register */
5088 bnx2x_cl45_write(bp, port, ext_phy_type,
5089 ext_phy_addr,
5090 MDIO_PMA_DEVAD,
5091 MDIO_PMA_REG_CDR_BANDWIDTH,
5092 0x0333);
5093 }
5094 bnx2x_cl45_read(bp, params->port,
5095 ext_phy_type,
5096 ext_phy_addr,
5097 MDIO_PMA_DEVAD,
5098 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
5099 &link_status);
5100
5101 /* Bits 0..2 --> speed detected,
5102 bits 13..15--> link is down */
5103 if ((link_status & (1<<2)) &&
5104 (!(link_status & (1<<15)))) {
5105 ext_phy_link_up = 1;
5106 vars->line_speed = SPEED_10000;
5107 DP(NETIF_MSG_LINK,
5108 "port %x: External link"
5109 " up in 10G\n", params->port);
5110 } else if ((link_status & (1<<1)) &&
5111 (!(link_status & (1<<14)))) {
5112 ext_phy_link_up = 1;
5113 vars->line_speed = SPEED_2500;
5114 DP(NETIF_MSG_LINK,
5115 "port %x: External link"
5116 " up in 2.5G\n", params->port);
5117 } else if ((link_status & (1<<0)) &&
5118 (!(link_status & (1<<13)))) {
5119 ext_phy_link_up = 1;
5120 vars->line_speed = SPEED_1000;
5121 DP(NETIF_MSG_LINK,
5122 "port %x: External link"
5123 " up in 1G\n", params->port);
5124 } else {
5125 ext_phy_link_up = 0;
5126 DP(NETIF_MSG_LINK,
5127 "port %x: External link"
5128 " is down\n", params->port);
5129 }
5130 } else {
5131 /* See if 1G link is up for the 8072 */
5132 bnx2x_cl45_read(bp, params->port,
5133 ext_phy_type,
5134 ext_phy_addr,
5135 MDIO_AN_DEVAD,
5136 MDIO_AN_REG_LINK_STATUS,
5137 &an1000_status);
5138 bnx2x_cl45_read(bp, params->port,
5139 ext_phy_type,
5140 ext_phy_addr,
5141 MDIO_AN_DEVAD,
5142 MDIO_AN_REG_LINK_STATUS,
5143 &an1000_status);
5144 if (an1000_status & (1<<1)) {
5145 ext_phy_link_up = 1;
5146 vars->line_speed = SPEED_1000;
5147 DP(NETIF_MSG_LINK,
5148 "port %x: External link"
5149 " up in 1G\n", params->port);
5150 } else if (ext_phy_link_up) {
5151 ext_phy_link_up = 1;
5152 vars->line_speed = SPEED_10000;
5153 DP(NETIF_MSG_LINK,
5154 "port %x: External link"
5155 " up in 10G\n", params->port);
5156 }
5157 }
5158
5159
5160 break;
5161 }
5162 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5163 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5164 ext_phy_addr,
5165 MDIO_PMA_DEVAD,
5166 MDIO_PMA_REG_LASI_STATUS, &val2);
5167 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5168 ext_phy_addr,
5169 MDIO_PMA_DEVAD,
5170 MDIO_PMA_REG_LASI_STATUS, &val1);
5171 DP(NETIF_MSG_LINK,
5172 "10G-base-T LASI status 0x%x->0x%x\n",
5173 val2, val1);
5174 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5175 ext_phy_addr,
5176 MDIO_PMA_DEVAD,
5177 MDIO_PMA_REG_STATUS, &val2);
5178 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5179 ext_phy_addr,
5180 MDIO_PMA_DEVAD,
5181 MDIO_PMA_REG_STATUS, &val1);
5182 DP(NETIF_MSG_LINK,
5183 "10G-base-T PMA status 0x%x->0x%x\n",
5184 val2, val1);
5185 ext_phy_link_up = ((val1 & 4) == 4);
5186 /* if link is up
5187 * print the AN outcome of the SFX7101 PHY
5188 */
5189 if (ext_phy_link_up) {
5190 bnx2x_cl45_read(bp, params->port,
5191 ext_phy_type,
5192 ext_phy_addr,
5193 MDIO_AN_DEVAD,
5194 MDIO_AN_REG_MASTER_STATUS,
5195 &val2);
5196 vars->line_speed = SPEED_10000;
5197 DP(NETIF_MSG_LINK,
5198 "SFX7101 AN status 0x%x->Master=%x\n",
5199 val2,
5200 (val2 & (1<<14)));
5201 }
5202 break;
5203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5205 /* Check 10G-BaseT link status */
5206 /* Check PMD signal ok */
5207 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5208 ext_phy_addr,
5209 MDIO_AN_DEVAD,
5210 0xFFFA,
5211 &val1);
5212 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5213 ext_phy_addr,
5214 MDIO_PMA_DEVAD,
5215 MDIO_PMA_REG_8481_PMD_SIGNAL,
5216 &val2);
5217 DP(NETIF_MSG_LINK, "PMD_SIGNAL 1.a811 = 0x%x\n", val2);
5218
5219 /* Check link 10G */
5220 if (val2 & (1<<11)) {
5221 vars->line_speed = SPEED_10000;
5222 ext_phy_link_up = 1;
5223 bnx2x_8481_set_10G_led_mode(params,
5224 ext_phy_type,
5225 ext_phy_addr);
5226 } else { /* Check Legacy speed link */
5227 u16 legacy_status, legacy_speed;
5228
5229 /* Enable expansion register 0x42
5230 (Operation mode status) */
5231 bnx2x_cl45_write(bp, params->port,
5232 ext_phy_type,
5233 ext_phy_addr,
5234 MDIO_AN_DEVAD,
5235 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS,
5236 0xf42);
5237
5238 /* Get legacy speed operation status */
5239 bnx2x_cl45_read(bp, params->port,
5240 ext_phy_type,
5241 ext_phy_addr,
5242 MDIO_AN_DEVAD,
5243 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
5244 &legacy_status);
5245
5246 DP(NETIF_MSG_LINK, "Legacy speed status"
5247 " = 0x%x\n", legacy_status);
5248 ext_phy_link_up = ((legacy_status & (1<<11))
5249 == (1<<11));
5250 if (ext_phy_link_up) {
5251 legacy_speed = (legacy_status & (3<<9));
5252 if (legacy_speed == (0<<9))
5253 vars->line_speed = SPEED_10;
5254 else if (legacy_speed == (1<<9))
5255 vars->line_speed =
5256 SPEED_100;
5257 else if (legacy_speed == (2<<9))
5258 vars->line_speed =
5259 SPEED_1000;
5260 else /* Should not happen */
5261 vars->line_speed = 0;
5262
5263 if (legacy_status & (1<<8))
5264 vars->duplex = DUPLEX_FULL;
5265 else
5266 vars->duplex = DUPLEX_HALF;
5267
5268 DP(NETIF_MSG_LINK, "Link is up "
5269 "in %dMbps, is_duplex_full"
5270 "= %d\n",
5271 vars->line_speed,
5272 (vars->duplex == DUPLEX_FULL));
5273 bnx2x_8481_set_legacy_led_mode(params,
5274 ext_phy_type,
5275 ext_phy_addr);
5276 }
5277 }
5278 break;
5279 default:
5280 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
5281 params->ext_phy_config);
5282 ext_phy_link_up = 0;
5283 break;
5284 }
5285 /* Set SGMII mode for external phy */
5286 if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5287 if (vars->line_speed < SPEED_1000)
5288 vars->phy_flags |= PHY_SGMII_FLAG;
5289 else
5290 vars->phy_flags &= ~PHY_SGMII_FLAG;
5291 }
5292
5293 } else { /* SerDes */
5294 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
5295 switch (ext_phy_type) {
5296 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
5297 DP(NETIF_MSG_LINK, "SerDes Direct\n");
5298 ext_phy_link_up = 1;
5299 break;
5300
5301 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
5302 DP(NETIF_MSG_LINK, "SerDes 5482\n");
5303 ext_phy_link_up = 1;
5304 break;
5305
5306 default:
5307 DP(NETIF_MSG_LINK,
5308 "BAD SerDes ext_phy_config 0x%x\n",
5309 params->ext_phy_config);
5310 ext_phy_link_up = 0;
5311 break;
5312 }
5313 }
5314
5315 return ext_phy_link_up;
5316}
5317
5318static void bnx2x_link_int_enable(struct link_params *params)
5319{
5320 u8 port = params->port;
5321 u32 ext_phy_type;
5322 u32 mask;
5323 struct bnx2x *bp = params->bp;
5324
5325 /* setting the status to report on link up
5326 for either XGXS or SerDes */
5327
5328 if (params->switch_cfg == SWITCH_CFG_10G) {
5329 mask = (NIG_MASK_XGXS0_LINK10G |
5330 NIG_MASK_XGXS0_LINK_STATUS);
5331 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
5332 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5333 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
5334 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
5335 (ext_phy_type !=
5336 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
5337 mask |= NIG_MASK_MI_INT;
5338 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5339 }
5340
5341 } else { /* SerDes */
5342 mask = NIG_MASK_SERDES0_LINK_STATUS;
5343 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
5344 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
5345 if ((ext_phy_type !=
5346 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
5347 (ext_phy_type !=
5348 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
5349 mask |= NIG_MASK_MI_INT;
5350 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5351 }
5352 }
5353 bnx2x_bits_en(bp,
5354 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
5355 mask);
5356
5357 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
5358 (params->switch_cfg == SWITCH_CFG_10G),
5359 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
5360 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
5361 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
5362 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
5363 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
5364 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
5365 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
5366 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
5367}
5368
5369static void bnx2x_8481_rearm_latch_signal(struct bnx2x *bp, u8 port,
5370 u8 is_mi_int)
5371{
5372 u32 latch_status = 0, is_mi_int_status;
5373 /* Disable the MI INT ( external phy int )
5374 * by writing 1 to the status register. Link down indication
5375 * is high-active-signal, so in this case we need to write the
5376 * status to clear the XOR
5377 */
5378 /* Read Latched signals */
5379 latch_status = REG_RD(bp,
5380 NIG_REG_LATCH_STATUS_0 + port*8);
5381 is_mi_int_status = REG_RD(bp,
5382 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4);
5383 DP(NETIF_MSG_LINK, "original_signal = 0x%x, nig_status = 0x%x,"
5384 "latch_status = 0x%x\n",
5385 is_mi_int, is_mi_int_status, latch_status);
5386 /* Handle only those with latched-signal=up.*/
5387 if (latch_status & 1) {
5388 /* For all latched-signal=up,Write original_signal to status */
5389 if (is_mi_int)
5390 bnx2x_bits_en(bp,
5391 NIG_REG_STATUS_INTERRUPT_PORT0
5392 + port*4,
5393 NIG_STATUS_EMAC0_MI_INT);
5394 else
5395 bnx2x_bits_dis(bp,
5396 NIG_REG_STATUS_INTERRUPT_PORT0
5397 + port*4,
5398 NIG_STATUS_EMAC0_MI_INT);
5399 /* For all latched-signal=up : Re-Arm Latch signals */
5400 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
5401 (latch_status & 0xfffe) | (latch_status & 1));
5402 }
5403}
5404/*
5405 * link management
5406 */
5407static void bnx2x_link_int_ack(struct link_params *params,
5408 struct link_vars *vars, u8 is_10g,
5409 u8 is_mi_int)
5410{
5411 struct bnx2x *bp = params->bp;
5412 u8 port = params->port;
5413
5414 /* first reset all status
5415 * we assume only one line will be change at a time */
5416 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5417 (NIG_STATUS_XGXS0_LINK10G |
5418 NIG_STATUS_XGXS0_LINK_STATUS |
5419 NIG_STATUS_SERDES0_LINK_STATUS));
5420 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5421 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5422 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5423 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5424 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5425 }
5426 if (vars->phy_link_up) {
5427 if (is_10g) {
5428 /* Disable the 10G link interrupt
5429 * by writing 1 to the status register
5430 */
5431 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
5432 bnx2x_bits_en(bp,
5433 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5434 NIG_STATUS_XGXS0_LINK10G);
5435
5436 } else if (params->switch_cfg == SWITCH_CFG_10G) {
5437 /* Disable the link interrupt
5438 * by writing 1 to the relevant lane
5439 * in the status register
5440 */
5441 u32 ser_lane = ((params->lane_config &
5442 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
5443 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
5444
5445 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
5446 vars->line_speed);
5447 bnx2x_bits_en(bp,
5448 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5449 ((1 << ser_lane) <<
5450 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
5451
5452 } else { /* SerDes */
5453 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
5454 /* Disable the link interrupt
5455 * by writing 1 to the status register
5456 */
5457 bnx2x_bits_en(bp,
5458 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5459 NIG_STATUS_SERDES0_LINK_STATUS);
5460 }
5461
5462 } else { /* link_down */
5463 }
5464}
5465
5466static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
5467{
5468 u8 *str_ptr = str;
5469 u32 mask = 0xf0000000;
5470 u8 shift = 8*4;
5471 u8 digit;
5472 if (len < 10) {
5473 /* Need more than 10chars for this format */
5474 *str_ptr = '\0';
5475 return -EINVAL;
5476 }
5477 while (shift > 0) {
5478
5479 shift -= 4;
5480 digit = ((num & mask) >> shift);
5481 if (digit < 0xa)
5482 *str_ptr = digit + '0';
5483 else
5484 *str_ptr = digit - 0xa + 'a';
5485 str_ptr++;
5486 mask = mask >> 4;
5487 if (shift == 4*4) {
5488 *str_ptr = ':';
5489 str_ptr++;
5490 }
5491 }
5492 *str_ptr = '\0';
5493 return 0;
5494}
5495
5496u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
5497 u8 *version, u16 len)
5498{
5499 struct bnx2x *bp;
5500 u32 ext_phy_type = 0;
5501 u32 spirom_ver = 0;
5502 u8 status;
5503
5504 if (version == NULL || params == NULL)
5505 return -EINVAL;
5506 bp = params->bp;
5507
5508 spirom_ver = REG_RD(bp, params->shmem_base +
5509 offsetof(struct shmem_region,
5510 port_mb[params->port].ext_phy_fw_version));
5511
5512 status = 0;
5513 /* reset the returned value to zero */
5514 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5515 switch (ext_phy_type) {
5516 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5517
5518 if (len < 5)
5519 return -EINVAL;
5520
5521 version[0] = (spirom_ver & 0xFF);
5522 version[1] = (spirom_ver & 0xFF00) >> 8;
5523 version[2] = (spirom_ver & 0xFF0000) >> 16;
5524 version[3] = (spirom_ver & 0xFF000000) >> 24;
5525 version[4] = '\0';
5526
5527 break;
5528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5533 status = bnx2x_format_ver(spirom_ver, version, len);
5534 break;
5535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5536 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5537 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5538 (spirom_ver & 0x7F);
5539 status = bnx2x_format_ver(spirom_ver, version, len);
5540 break;
5541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5543 version[0] = '\0';
5544 break;
5545
5546 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5547 DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:"
5548 " type is FAILURE!\n");
5549 status = -EINVAL;
5550 break;
5551
5552 default:
5553 break;
5554 }
5555 return status;
5556}
5557
5558static void bnx2x_set_xgxs_loopback(struct link_params *params,
5559 struct link_vars *vars,
5560 u8 is_10g)
5561{
5562 u8 port = params->port;
5563 struct bnx2x *bp = params->bp;
5564
5565 if (is_10g) {
5566 u32 md_devad;
5567
5568 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
5569
5570 /* change the uni_phy_addr in the nig */
5571 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
5572 port*0x18));
5573
5574 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
5575
5576 bnx2x_cl45_write(bp, port, 0,
5577 params->phy_addr,
5578 5,
5579 (MDIO_REG_BANK_AER_BLOCK +
5580 (MDIO_AER_BLOCK_AER_REG & 0xf)),
5581 0x2800);
5582
5583 bnx2x_cl45_write(bp, port, 0,
5584 params->phy_addr,
5585 5,
5586 (MDIO_REG_BANK_CL73_IEEEB0 +
5587 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
5588 0x6041);
5589 msleep(200);
5590 /* set aer mmd back */
5591 bnx2x_set_aer_mmd(params, vars);
5592
5593 /* and md_devad */
5594 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
5595 md_devad);
5596
5597 } else {
5598 u16 mii_control;
5599
5600 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
5601
5602 CL45_RD_OVER_CL22(bp, port,
5603 params->phy_addr,
5604 MDIO_REG_BANK_COMBO_IEEE0,
5605 MDIO_COMBO_IEEE0_MII_CONTROL,
5606 &mii_control);
5607
5608 CL45_WR_OVER_CL22(bp, port,
5609 params->phy_addr,
5610 MDIO_REG_BANK_COMBO_IEEE0,
5611 MDIO_COMBO_IEEE0_MII_CONTROL,
5612 (mii_control |
5613 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
5614 }
5615}
5616
5617
5618static void bnx2x_ext_phy_loopback(struct link_params *params)
5619{
5620 struct bnx2x *bp = params->bp;
5621 u8 ext_phy_addr;
5622 u32 ext_phy_type;
5623
5624 if (params->switch_cfg == SWITCH_CFG_10G) {
5625 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5626 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
5627 /* CL37 Autoneg Enabled */
5628 switch (ext_phy_type) {
5629 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5630 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
5631 DP(NETIF_MSG_LINK,
5632 "ext_phy_loopback: We should not get here\n");
5633 break;
5634 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5635 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n");
5636 break;
5637 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5638 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n");
5639 break;
5640 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5641 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
5642 bnx2x_cl45_write(bp, params->port, ext_phy_type,
5643 ext_phy_addr,
5644 MDIO_PMA_DEVAD,
5645 MDIO_PMA_REG_CTRL,
5646 0x0001);
5647 break;
5648 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5649 /* SFX7101_XGXS_TEST1 */
5650 bnx2x_cl45_write(bp, params->port, ext_phy_type,
5651 ext_phy_addr,
5652 MDIO_XS_DEVAD,
5653 MDIO_XS_SFX7101_XGXS_TEST1,
5654 0x100);
5655 DP(NETIF_MSG_LINK,
5656 "ext_phy_loopback: set ext phy loopback\n");
5657 break;
5658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5659
5660 break;
5661 } /* switch external PHY type */
5662 } else {
5663 /* serdes */
5664 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
5665 ext_phy_addr = (params->ext_phy_config &
5666 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK)
5667 >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT;
5668 }
5669}
5670
5671
5672/*
5673 *------------------------------------------------------------------------
5674 * bnx2x_override_led_value -
5675 *
5676 * Override the led value of the requsted led
5677 *
5678 *------------------------------------------------------------------------
5679 */
5680u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5681 u32 led_idx, u32 value)
5682{
5683 u32 reg_val;
5684
5685 /* If port 0 then use EMAC0, else use EMAC1*/
5686 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5687
5688 DP(NETIF_MSG_LINK,
5689 "bnx2x_override_led_value() port %x led_idx %d value %d\n",
5690 port, led_idx, value);
5691
5692 switch (led_idx) {
5693 case 0: /* 10MB led */
5694 /* Read the current value of the LED register in
5695 the EMAC block */
5696 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5697 /* Set the OVERRIDE bit to 1 */
5698 reg_val |= EMAC_LED_OVERRIDE;
5699 /* If value is 1, set the 10M_OVERRIDE bit,
5700 otherwise reset it.*/
5701 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
5702 (reg_val & ~EMAC_LED_10MB_OVERRIDE);
5703 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5704 break;
5705 case 1: /*100MB led */
5706 /*Read the current value of the LED register in
5707 the EMAC block */
5708 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5709 /* Set the OVERRIDE bit to 1 */
5710 reg_val |= EMAC_LED_OVERRIDE;
5711 /* If value is 1, set the 100M_OVERRIDE bit,
5712 otherwise reset it.*/
5713 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
5714 (reg_val & ~EMAC_LED_100MB_OVERRIDE);
5715 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5716 break;
5717 case 2: /* 1000MB led */
5718 /* Read the current value of the LED register in the
5719 EMAC block */
5720 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5721 /* Set the OVERRIDE bit to 1 */
5722 reg_val |= EMAC_LED_OVERRIDE;
5723 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
5724 reset it. */
5725 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
5726 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
5727 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5728 break;
5729 case 3: /* 2500MB led */
5730 /* Read the current value of the LED register in the
5731 EMAC block*/
5732 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5733 /* Set the OVERRIDE bit to 1 */
5734 reg_val |= EMAC_LED_OVERRIDE;
5735 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
5736 reset it.*/
5737 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
5738 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
5739 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5740 break;
5741 case 4: /*10G led */
5742 if (port == 0) {
5743 REG_WR(bp, NIG_REG_LED_10G_P0,
5744 value);
5745 } else {
5746 REG_WR(bp, NIG_REG_LED_10G_P1,
5747 value);
5748 }
5749 break;
5750 case 5: /* TRAFFIC led */
5751 /* Find if the traffic control is via BMAC or EMAC */
5752 if (port == 0)
5753 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
5754 else
5755 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
5756
5757 /* Override the traffic led in the EMAC:*/
5758 if (reg_val == 1) {
5759 /* Read the current value of the LED register in
5760 the EMAC block */
5761 reg_val = REG_RD(bp, emac_base +
5762 EMAC_REG_EMAC_LED);
5763 /* Set the TRAFFIC_OVERRIDE bit to 1 */
5764 reg_val |= EMAC_LED_OVERRIDE;
5765 /* If value is 1, set the TRAFFIC bit, otherwise
5766 reset it.*/
5767 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
5768 (reg_val & ~EMAC_LED_TRAFFIC);
5769 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5770 } else { /* Override the traffic led in the BMAC: */
5771 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5772 + port*4, 1);
5773 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
5774 value);
5775 }
5776 break;
5777 default:
5778 DP(NETIF_MSG_LINK,
5779 "bnx2x_override_led_value() unknown led index %d "
5780 "(should be 0-5)\n", led_idx);
5781 return -EINVAL;
5782 }
5783
5784 return 0;
5785}
5786
5787
5788u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
5789{
5790 u8 port = params->port;
5791 u16 hw_led_mode = params->hw_led_mode;
5792 u8 rc = 0;
5793 u32 tmp;
5794 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5795 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5796 struct bnx2x *bp = params->bp;
5797 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
5798 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
5799 speed, hw_led_mode);
5800 switch (mode) {
5801 case LED_MODE_OFF:
5802 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
5803 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5804 SHARED_HW_CFG_LED_MAC1);
5805
5806 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5807 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
5808 break;
5809
5810 case LED_MODE_OPER:
5811 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5812 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5813 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5814 } else {
5815 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5816 hw_led_mode);
5817 }
5818
5819 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
5820 port*4, 0);
5821 /* Set blinking rate to ~15.9Hz */
5822 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
5823 LED_BLINK_RATE_VAL);
5824 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
5825 port*4, 1);
5826 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5827 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5828 (tmp & (~EMAC_LED_OVERRIDE)));
5829
5830 if (CHIP_IS_E1(bp) &&
5831 ((speed == SPEED_2500) ||
5832 (speed == SPEED_1000) ||
5833 (speed == SPEED_100) ||
5834 (speed == SPEED_10))) {
5835 /* On Everest 1 Ax chip versions for speeds less than
5836 10G LED scheme is different */
5837 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5838 + port*4, 1);
5839 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
5840 port*4, 0);
5841 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
5842 port*4, 1);
5843 }
5844 break;
5845
5846 default:
5847 rc = -EINVAL;
5848 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
5849 mode);
5850 break;
5851 }
5852 return rc;
5853
5854}
5855
5856u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars)
5857{
5858 struct bnx2x *bp = params->bp;
5859 u16 gp_status = 0;
5860
5861 CL45_RD_OVER_CL22(bp, params->port,
5862 params->phy_addr,
5863 MDIO_REG_BANK_GP_STATUS,
5864 MDIO_GP_STATUS_TOP_AN_STATUS1,
5865 &gp_status);
5866 /* link is up only if both local phy and external phy are up */
5867 if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
5868 bnx2x_ext_phy_is_link_up(params, vars, 1))
5869 return 0;
5870
5871 return -ESRCH;
5872}
5873
5874static u8 bnx2x_link_initialize(struct link_params *params,
5875 struct link_vars *vars)
5876{
5877 struct bnx2x *bp = params->bp;
5878 u8 port = params->port;
5879 u8 rc = 0;
5880 u8 non_ext_phy;
5881 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5882
5883 /* Activate the external PHY */
5884 bnx2x_ext_phy_reset(params, vars);
5885
5886 bnx2x_set_aer_mmd(params, vars);
5887
5888 if (vars->phy_flags & PHY_XGXS_FLAG)
5889 bnx2x_set_master_ln(params);
5890
5891 rc = bnx2x_reset_unicore(params);
5892 /* reset the SerDes and wait for reset bit return low */
5893 if (rc != 0)
5894 return rc;
5895
5896 bnx2x_set_aer_mmd(params, vars);
5897
5898 /* setting the masterLn_def again after the reset */
5899 if (vars->phy_flags & PHY_XGXS_FLAG) {
5900 bnx2x_set_master_ln(params);
5901 bnx2x_set_swap_lanes(params);
5902 }
5903
5904 if (vars->phy_flags & PHY_XGXS_FLAG) {
5905 if ((params->req_line_speed &&
5906 ((params->req_line_speed == SPEED_100) ||
5907 (params->req_line_speed == SPEED_10))) ||
5908 (!params->req_line_speed &&
5909 (params->speed_cap_mask >=
5910 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
5911 (params->speed_cap_mask <
5912 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
5913 )) {
5914 vars->phy_flags |= PHY_SGMII_FLAG;
5915 } else {
5916 vars->phy_flags &= ~PHY_SGMII_FLAG;
5917 }
5918 }
5919 /* In case of external phy existance, the line speed would be the
5920 line speed linked up by the external phy. In case it is direct only,
5921 then the line_speed during initialization will be equal to the
5922 req_line_speed*/
5923 vars->line_speed = params->req_line_speed;
5924
5925 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
5926
5927 /* init ext phy and enable link state int */
5928 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
5929 (params->loopback_mode == LOOPBACK_XGXS_10));
5930
5931 if (non_ext_phy ||
5932 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5933 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
5934 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
5935 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
5936 if (params->req_line_speed == SPEED_AUTO_NEG)
5937 bnx2x_set_parallel_detection(params, vars->phy_flags);
5938 bnx2x_init_internal_phy(params, vars, non_ext_phy);
5939 }
5940
5941 if (!non_ext_phy)
5942 rc |= bnx2x_ext_phy_init(params, vars);
5943
5944 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5945 (NIG_STATUS_XGXS0_LINK10G |
5946 NIG_STATUS_XGXS0_LINK_STATUS |
5947 NIG_STATUS_SERDES0_LINK_STATUS));
5948
5949 return rc;
5950
5951}
5952
5953
5954u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5955{
5956 struct bnx2x *bp = params->bp;
5957 u32 val;
5958
5959 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
5960 DP(NETIF_MSG_LINK, "req_speed %d, req_flowctrl %d\n",
5961 params->req_line_speed, params->req_flow_ctrl);
5962 vars->link_status = 0;
5963 vars->phy_link_up = 0;
5964 vars->link_up = 0;
5965 vars->line_speed = 0;
5966 vars->duplex = DUPLEX_FULL;
5967 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5968 vars->mac_type = MAC_TYPE_NONE;
5969
5970 if (params->switch_cfg == SWITCH_CFG_1G)
5971 vars->phy_flags = PHY_SERDES_FLAG;
5972 else
5973 vars->phy_flags = PHY_XGXS_FLAG;
5974
5975 /* disable attentions */
5976 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
5977 (NIG_MASK_XGXS0_LINK_STATUS |
5978 NIG_MASK_XGXS0_LINK10G |
5979 NIG_MASK_SERDES0_LINK_STATUS |
5980 NIG_MASK_MI_INT));
5981
5982 bnx2x_emac_init(params, vars);
5983
5984 if (CHIP_REV_IS_FPGA(bp)) {
5985
5986 vars->link_up = 1;
5987 vars->line_speed = SPEED_10000;
5988 vars->duplex = DUPLEX_FULL;
5989 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5990 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
5991 /* enable on E1.5 FPGA */
5992 if (CHIP_IS_E1H(bp)) {
5993 vars->flow_ctrl |=
5994 (BNX2X_FLOW_CTRL_TX |
5995 BNX2X_FLOW_CTRL_RX);
5996 vars->link_status |=
5997 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
5998 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
5999 }
6000
6001 bnx2x_emac_enable(params, vars, 0);
6002 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
6003 /* disable drain */
6004 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6005
6006 /* update shared memory */
6007 bnx2x_update_mng(params, vars->link_status);
6008
6009 return 0;
6010
6011 } else
6012 if (CHIP_REV_IS_EMUL(bp)) {
6013
6014 vars->link_up = 1;
6015 vars->line_speed = SPEED_10000;
6016 vars->duplex = DUPLEX_FULL;
6017 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6018 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
6019
6020 bnx2x_bmac_enable(params, vars, 0);
6021
6022 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
6023 /* Disable drain */
6024 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
6025 + params->port*4, 0);
6026
6027 /* update shared memory */
6028 bnx2x_update_mng(params, vars->link_status);
6029
6030 return 0;
6031
6032 } else
6033 if (params->loopback_mode == LOOPBACK_BMAC) {
6034
6035 vars->link_up = 1;
6036 vars->line_speed = SPEED_10000;
6037 vars->duplex = DUPLEX_FULL;
6038 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6039 vars->mac_type = MAC_TYPE_BMAC;
6040
6041 vars->phy_flags = PHY_XGXS_FLAG;
6042
6043 bnx2x_phy_deassert(params, vars->phy_flags);
6044 /* set bmac loopback */
6045 bnx2x_bmac_enable(params, vars, 1);
6046
6047 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6048 params->port*4, 0);
6049
6050 } else if (params->loopback_mode == LOOPBACK_EMAC) {
6051
6052 vars->link_up = 1;
6053 vars->line_speed = SPEED_1000;
6054 vars->duplex = DUPLEX_FULL;
6055 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6056 vars->mac_type = MAC_TYPE_EMAC;
6057
6058 vars->phy_flags = PHY_XGXS_FLAG;
6059
6060 bnx2x_phy_deassert(params, vars->phy_flags);
6061 /* set bmac loopback */
6062 bnx2x_emac_enable(params, vars, 1);
6063 bnx2x_emac_program(params, vars->line_speed,
6064 vars->duplex);
6065 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6066 params->port*4, 0);
6067
6068 } else if ((params->loopback_mode == LOOPBACK_XGXS_10) ||
6069 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
6070
6071 vars->link_up = 1;
6072 vars->line_speed = SPEED_10000;
6073 vars->duplex = DUPLEX_FULL;
6074 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6075
6076 vars->phy_flags = PHY_XGXS_FLAG;
6077
6078 val = REG_RD(bp,
6079 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6080 params->port*0x18);
6081 params->phy_addr = (u8)val;
6082
6083 bnx2x_phy_deassert(params, vars->phy_flags);
6084 bnx2x_link_initialize(params, vars);
6085
6086 vars->mac_type = MAC_TYPE_BMAC;
6087
6088 bnx2x_bmac_enable(params, vars, 0);
6089
6090 if (params->loopback_mode == LOOPBACK_XGXS_10) {
6091 /* set 10G XGXS loopback */
6092 bnx2x_set_xgxs_loopback(params, vars, 1);
6093 } else {
6094 /* set external phy loopback */
6095 bnx2x_ext_phy_loopback(params);
6096 }
6097 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6098 params->port*4, 0);
6099
6100 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
6101 } else
6102 /* No loopback */
6103 {
6104 bnx2x_phy_deassert(params, vars->phy_flags);
6105 switch (params->switch_cfg) {
6106 case SWITCH_CFG_1G:
6107 vars->phy_flags |= PHY_SERDES_FLAG;
6108 if ((params->ext_phy_config &
6109 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
6110 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
6111 vars->phy_flags |= PHY_SGMII_FLAG;
6112 }
6113
6114 val = REG_RD(bp,
6115 NIG_REG_SERDES0_CTRL_PHY_ADDR+
6116 params->port*0x10);
6117
6118 params->phy_addr = (u8)val;
6119
6120 break;
6121 case SWITCH_CFG_10G:
6122 vars->phy_flags |= PHY_XGXS_FLAG;
6123 val = REG_RD(bp,
6124 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6125 params->port*0x18);
6126 params->phy_addr = (u8)val;
6127
6128 break;
6129 default:
6130 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
6131 return -EINVAL;
6132 }
6133 DP(NETIF_MSG_LINK, "Phy address = 0x%x\n", params->phy_addr);
6134
6135 bnx2x_link_initialize(params, vars);
6136 msleep(30);
6137 bnx2x_link_int_enable(params);
6138 }
6139 return 0;
6140}
6141
6142static void bnx2x_8726_reset_phy(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
6143{
6144 DP(NETIF_MSG_LINK, "bnx2x_8726_reset_phy port %d\n", port);
6145
6146 /* Set serial boot control for external load */
6147 bnx2x_cl45_write(bp, port,
6148 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, ext_phy_addr,
6149 MDIO_PMA_DEVAD,
6150 MDIO_PMA_REG_GEN_CTRL, 0x0001);
6151}
6152
6153u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6154 u8 reset_ext_phy)
6155{
6156 struct bnx2x *bp = params->bp;
6157 u32 ext_phy_config = params->ext_phy_config;
6158 u8 port = params->port;
6159 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6160 u32 val = REG_RD(bp, params->shmem_base +
6161 offsetof(struct shmem_region, dev_info.
6162 port_feature_config[params->port].
6163 config));
6164 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6165 /* disable attentions */
6166 vars->link_status = 0;
6167 bnx2x_update_mng(params, vars->link_status);
6168 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6169 (NIG_MASK_XGXS0_LINK_STATUS |
6170 NIG_MASK_XGXS0_LINK10G |
6171 NIG_MASK_SERDES0_LINK_STATUS |
6172 NIG_MASK_MI_INT));
6173
6174 /* activate nig drain */
6175 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6176
6177 /* disable nig egress interface */
6178 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
6179 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
6180
6181 /* Stop BigMac rx */
6182 bnx2x_bmac_rx_disable(bp, port);
6183
6184 /* disable emac */
6185 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6186
6187 msleep(10);
6188 /* The PHY reset is controled by GPIO 1
6189 * Hold it as vars low
6190 */
6191 /* clear link led */
6192 bnx2x_set_led(params, LED_MODE_OFF, 0);
6193 if (reset_ext_phy) {
6194 switch (ext_phy_type) {
6195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6196 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6197 break;
6198
6199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6200 {
6201
6202 /* Disable Transmitter */
6203 u8 ext_phy_addr =
6204 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6205 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
6206 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
6207 bnx2x_sfp_set_transmitter(bp, port,
6208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6209 ext_phy_addr, 0);
6210 break;
6211 }
6212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6213 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
6214 "low power mode\n",
6215 port);
6216 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6217 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6218 port);
6219 break;
6220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6221 {
6222 u8 ext_phy_addr =
6223 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6224 /* Set soft reset */
6225 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6226 break;
6227 }
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6229 {
6230 u8 ext_phy_addr =
6231 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6232 bnx2x_cl45_write(bp, port,
6233 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6234 ext_phy_addr,
6235 MDIO_AN_DEVAD,
6236 MDIO_AN_REG_CTRL, 0x0000);
6237 bnx2x_cl45_write(bp, port,
6238 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6239 ext_phy_addr,
6240 MDIO_PMA_DEVAD,
6241 MDIO_PMA_REG_CTRL, 1);
6242 break;
6243 }
6244 default:
6245 /* HW reset */
6246 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6247 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6248 port);
6249 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6250 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6251 port);
6252 DP(NETIF_MSG_LINK, "reset external PHY\n");
6253 }
6254 }
6255 /* reset the SerDes/XGXS */
6256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6257 (0x1ff << (port*16)));
6258
6259 /* reset BigMac */
6260 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6261 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6262
6263 /* disable nig ingress interface */
6264 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
6265 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
6266 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
6267 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
6268 vars->link_up = 0;
6269 return 0;
6270}
6271
6272static u8 bnx2x_update_link_down(struct link_params *params,
6273 struct link_vars *vars)
6274{
6275 struct bnx2x *bp = params->bp;
6276 u8 port = params->port;
6277
6278 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6279 bnx2x_set_led(params, LED_MODE_OFF, 0);
6280
6281 /* indicate no mac active */
6282 vars->mac_type = MAC_TYPE_NONE;
6283
6284 /* update shared memory */
6285 vars->link_status = 0;
6286 vars->line_speed = 0;
6287 bnx2x_update_mng(params, vars->link_status);
6288
6289 /* activate nig drain */
6290 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6291
6292 /* disable emac */
6293 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6294
6295 msleep(10);
6296
6297 /* reset BigMac */
6298 bnx2x_bmac_rx_disable(bp, params->port);
6299 REG_WR(bp, GRCBASE_MISC +
6300 MISC_REGISTERS_RESET_REG_2_CLEAR,
6301 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6302 return 0;
6303}
6304
6305static u8 bnx2x_update_link_up(struct link_params *params,
6306 struct link_vars *vars,
6307 u8 link_10g, u32 gp_status)
6308{
6309 struct bnx2x *bp = params->bp;
6310 u8 port = params->port;
6311 u8 rc = 0;
6312
6313 vars->link_status |= LINK_STATUS_LINK_UP;
6314 if (link_10g) {
6315 bnx2x_bmac_enable(params, vars, 0);
6316 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6317 } else {
6318 rc = bnx2x_emac_program(params, vars->line_speed,
6319 vars->duplex);
6320
6321 bnx2x_emac_enable(params, vars, 0);
6322
6323 /* AN complete? */
6324 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6325 if (!(vars->phy_flags &
6326 PHY_SGMII_FLAG))
6327 bnx2x_set_gmii_tx_driver(params);
6328 }
6329 }
6330
6331 /* PBF - link up */
6332 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6333 vars->line_speed);
6334
6335 /* disable drain */
6336 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6337
6338 /* update shared memory */
6339 bnx2x_update_mng(params, vars->link_status);
6340 msleep(20);
6341 return rc;
6342}
6343/* This function should called upon link interrupt */
6344/* In case vars->link_up, driver needs to
6345 1. Update the pbf
6346 2. Disable drain
6347 3. Update the shared memory
6348 4. Indicate link up
6349 5. Set LEDs
6350 Otherwise,
6351 1. Update shared memory
6352 2. Reset BigMac
6353 3. Report link down
6354 4. Unset LEDs
6355*/
6356u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6357{
6358 struct bnx2x *bp = params->bp;
6359 u8 port = params->port;
6360 u16 gp_status;
6361 u8 link_10g;
6362 u8 ext_phy_link_up, rc = 0;
6363 u32 ext_phy_type;
6364 u8 is_mi_int = 0;
6365
6366 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
6367 port, (vars->phy_flags & PHY_XGXS_FLAG),
6368 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
6369
6370 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
6371 port*0x18) > 0);
6372 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
6373 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
6374 is_mi_int,
6375 REG_RD(bp,
6376 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
6377
6378 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
6379 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6380 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6381
6382 /* disable emac */
6383 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6384
6385 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
6386
6387 /* Check external link change only for non-direct */
6388 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars, is_mi_int);
6389
6390 /* Read gp_status */
6391 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
6392 MDIO_REG_BANK_GP_STATUS,
6393 MDIO_GP_STATUS_TOP_AN_STATUS1,
6394 &gp_status);
6395
6396 rc = bnx2x_link_settings_status(params, vars, gp_status,
6397 ext_phy_link_up);
6398 if (rc != 0)
6399 return rc;
6400
6401 /* anything 10 and over uses the bmac */
6402 link_10g = ((vars->line_speed == SPEED_10000) ||
6403 (vars->line_speed == SPEED_12000) ||
6404 (vars->line_speed == SPEED_12500) ||
6405 (vars->line_speed == SPEED_13000) ||
6406 (vars->line_speed == SPEED_15000) ||
6407 (vars->line_speed == SPEED_16000));
6408
6409 bnx2x_link_int_ack(params, vars, link_10g, is_mi_int);
6410
6411 /* In case external phy link is up, and internal link is down
6412 ( not initialized yet probably after link initialization, it needs
6413 to be initialized.
6414 Note that after link down-up as result of cable plug,
6415 the xgxs link would probably become up again without the need to
6416 initialize it*/
6417
6418 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6419 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6420 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6421 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6422 (ext_phy_link_up && !vars->phy_link_up))
6423 bnx2x_init_internal_phy(params, vars, 0);
6424
6425 /* link is up only if both local phy and external phy are up */
6426 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
6427
6428 if (vars->link_up)
6429 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
6430 else
6431 rc = bnx2x_update_link_down(params, vars);
6432
6433 return rc;
6434}
6435
6436static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6437{
6438 u8 ext_phy_addr[PORT_MAX];
6439 u16 val;
6440 s8 port;
6441
6442 /* PART1 - Reset both phys */
6443 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6444 /* Extract the ext phy address for the port */
6445 u32 ext_phy_config = REG_RD(bp, shmem_base +
6446 offsetof(struct shmem_region,
6447 dev_info.port_hw_config[port].external_phy_config));
6448
6449 /* disable attentions */
6450 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6451 (NIG_MASK_XGXS0_LINK_STATUS |
6452 NIG_MASK_XGXS0_LINK10G |
6453 NIG_MASK_SERDES0_LINK_STATUS |
6454 NIG_MASK_MI_INT));
6455
6456 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6457
6458 /* Need to take the phy out of low power mode in order
6459 to write to access its registers */
6460 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6461 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6462
6463 /* Reset the phy */
6464 bnx2x_cl45_write(bp, port,
6465 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6466 ext_phy_addr[port],
6467 MDIO_PMA_DEVAD,
6468 MDIO_PMA_REG_CTRL,
6469 1<<15);
6470 }
6471
6472 /* Add delay of 150ms after reset */
6473 msleep(150);
6474
6475 /* PART2 - Download firmware to both phys */
6476 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6477 u16 fw_ver1;
6478
6479 bnx2x_bcm8073_external_rom_boot(bp, port,
6480 ext_phy_addr[port], shmem_base);
6481
6482 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6483 ext_phy_addr[port],
6484 MDIO_PMA_DEVAD,
6485 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6486 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
6487 DP(NETIF_MSG_LINK,
6488 "bnx2x_8073_common_init_phy port %x:"
6489 "Download failed. fw version = 0x%x\n",
6490 port, fw_ver1);
6491 return -EINVAL;
6492 }
6493
6494 /* Only set bit 10 = 1 (Tx power down) */
6495 bnx2x_cl45_read(bp, port,
6496 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6497 ext_phy_addr[port],
6498 MDIO_PMA_DEVAD,
6499 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6500
6501 /* Phase1 of TX_POWER_DOWN reset */
6502 bnx2x_cl45_write(bp, port,
6503 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6504 ext_phy_addr[port],
6505 MDIO_PMA_DEVAD,
6506 MDIO_PMA_REG_TX_POWER_DOWN,
6507 (val | 1<<10));
6508 }
6509
6510 /* Toggle Transmitter: Power down and then up with 600ms
6511 delay between */
6512 msleep(600);
6513
6514 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
6515 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6516 /* Phase2 of POWER_DOWN_RESET */
6517 /* Release bit 10 (Release Tx power down) */
6518 bnx2x_cl45_read(bp, port,
6519 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6520 ext_phy_addr[port],
6521 MDIO_PMA_DEVAD,
6522 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6523
6524 bnx2x_cl45_write(bp, port,
6525 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6526 ext_phy_addr[port],
6527 MDIO_PMA_DEVAD,
6528 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
6529 msleep(15);
6530
6531 /* Read modify write the SPI-ROM version select register */
6532 bnx2x_cl45_read(bp, port,
6533 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6534 ext_phy_addr[port],
6535 MDIO_PMA_DEVAD,
6536 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
6537 bnx2x_cl45_write(bp, port,
6538 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6539 ext_phy_addr[port],
6540 MDIO_PMA_DEVAD,
6541 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
6542
6543 /* set GPIO2 back to LOW */
6544 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6545 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6546 }
6547 return 0;
6548
6549}
6550
6551static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6552{
6553 u8 ext_phy_addr[PORT_MAX];
6554 s8 port, first_port, i;
6555 u32 swap_val, swap_override;
6556 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n");
6557 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6558 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6559
6560 bnx2x_ext_phy_hw_reset(bp, 1 ^ (swap_val && swap_override));
6561 msleep(5);
6562
6563 if (swap_val && swap_override)
6564 first_port = PORT_0;
6565 else
6566 first_port = PORT_1;
6567
6568 /* PART1 - Reset both phys */
6569 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
6570 /* Extract the ext phy address for the port */
6571 u32 ext_phy_config = REG_RD(bp, shmem_base +
6572 offsetof(struct shmem_region,
6573 dev_info.port_hw_config[port].external_phy_config));
6574
6575 /* disable attentions */
6576 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6577 (NIG_MASK_XGXS0_LINK_STATUS |
6578 NIG_MASK_XGXS0_LINK10G |
6579 NIG_MASK_SERDES0_LINK_STATUS |
6580 NIG_MASK_MI_INT));
6581
6582 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6583
6584 /* Reset the phy */
6585 bnx2x_cl45_write(bp, port,
6586 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6587 ext_phy_addr[port],
6588 MDIO_PMA_DEVAD,
6589 MDIO_PMA_REG_CTRL,
6590 1<<15);
6591 }
6592
6593 /* Add delay of 150ms after reset */
6594 msleep(150);
6595
6596 /* PART2 - Download firmware to both phys */
6597 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
6598 u16 fw_ver1;
6599
6600 bnx2x_bcm8727_external_rom_boot(bp, port,
6601 ext_phy_addr[port], shmem_base);
6602
6603 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6604 ext_phy_addr[port],
6605 MDIO_PMA_DEVAD,
6606 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6607 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
6608 DP(NETIF_MSG_LINK,
6609 "bnx2x_8727_common_init_phy port %x:"
6610 "Download failed. fw version = 0x%x\n",
6611 port, fw_ver1);
6612 return -EINVAL;
6613 }
6614 }
6615
6616 return 0;
6617}
6618
6619
6620static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6621{
6622 u8 ext_phy_addr;
6623 u32 val;
6624 s8 port;
6625
6626 /* Use port1 because of the static port-swap */
6627 /* Enable the module detection interrupt */
6628 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
6629 val |= ((1<<MISC_REGISTERS_GPIO_3)|
6630 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
6631 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
6632
6633 bnx2x_ext_phy_hw_reset(bp, 1);
6634 msleep(5);
6635 for (port = 0; port < PORT_MAX; port++) {
6636 /* Extract the ext phy address for the port */
6637 u32 ext_phy_config = REG_RD(bp, shmem_base +
6638 offsetof(struct shmem_region,
6639 dev_info.port_hw_config[port].external_phy_config));
6640
6641 ext_phy_addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
6642 DP(NETIF_MSG_LINK, "8726_common_init : ext_phy_addr = 0x%x\n",
6643 ext_phy_addr);
6644
6645 bnx2x_8726_reset_phy(bp, port, ext_phy_addr);
6646
6647 /* Set fault module detected LED on */
6648 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
6649 MISC_REGISTERS_GPIO_HIGH,
6650 port);
6651 }
6652
6653 return 0;
6654}
6655
6656
6657static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6658{
6659 /* HW reset */
6660 bnx2x_ext_phy_hw_reset(bp, 1);
6661 return 0;
6662}
6663u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6664{
6665 u8 rc = 0;
6666 u32 ext_phy_type;
6667
6668 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6669
6670 /* Read the ext_phy_type for arbitrary port(0) */
6671 ext_phy_type = XGXS_EXT_PHY_TYPE(
6672 REG_RD(bp, shmem_base +
6673 offsetof(struct shmem_region,
6674 dev_info.port_hw_config[0].external_phy_config)));
6675
6676 switch (ext_phy_type) {
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6678 {
6679 rc = bnx2x_8073_common_init_phy(bp, shmem_base);
6680 break;
6681 }
6682
6683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6684 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
6685 rc = bnx2x_8727_common_init_phy(bp, shmem_base);
6686 break;
6687
6688 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6689 /* GPIO1 affects both ports, so there's need to pull
6690 it for single port alone */
6691 rc = bnx2x_8726_common_init_phy(bp, shmem_base);
6692 break;
6693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6694 rc = bnx2x_84823_common_init_phy(bp, shmem_base);
6695 break;
6696 default:
6697 DP(NETIF_MSG_LINK,
6698 "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
6699 ext_phy_type);
6700 break;
6701 }
6702
6703 return rc;
6704}
6705
6706void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
6707{
6708 u16 val, cnt;
6709
6710 bnx2x_cl45_read(bp, port,
6711 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6712 phy_addr,
6713 MDIO_PMA_DEVAD,
6714 MDIO_PMA_REG_7101_RESET, &val);
6715
6716 for (cnt = 0; cnt < 10; cnt++) {
6717 msleep(50);
6718 /* Writes a self-clearing reset */
6719 bnx2x_cl45_write(bp, port,
6720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6721 phy_addr,
6722 MDIO_PMA_DEVAD,
6723 MDIO_PMA_REG_7101_RESET,
6724 (val | (1<<15)));
6725 /* Wait for clear */
6726 bnx2x_cl45_read(bp, port,
6727 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6728 phy_addr,
6729 MDIO_PMA_DEVAD,
6730 MDIO_PMA_REG_7101_RESET, &val);
6731
6732 if ((val & (1<<15)) == 0)
6733 break;
6734 }
6735}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
new file mode 100644
index 00000000000..40c2981de8e
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -0,0 +1,206 @@
1/* Copyright 2008-2009 Broadcom Corporation
2 *
3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you
5 * under the terms of the GNU General Public License version 2, available
6 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
7 *
8 * Notwithstanding the above, under no circumstances may you combine this
9 * software in any way with any other Broadcom software provided under a
10 * license other than the GPL, without Broadcom's express prior written
11 * consent.
12 *
13 * Written by Yaniv Rosner
14 *
15 */
16
17#ifndef BNX2X_LINK_H
18#define BNX2X_LINK_H
19
20
21
22/***********************************************************/
23/* Defines */
24/***********************************************************/
25#define DEFAULT_PHY_DEV_ADDR 3
26
27
28
29#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
30#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
31#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
32#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
33#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
34
35#define SPEED_AUTO_NEG 0
36#define SPEED_12000 12000
37#define SPEED_12500 12500
38#define SPEED_13000 13000
39#define SPEED_15000 15000
40#define SPEED_16000 16000
41
42#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
43#define SFP_EEPROM_VENDOR_NAME_SIZE 16
44#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
45#define SFP_EEPROM_VENDOR_OUI_SIZE 3
46#define SFP_EEPROM_PART_NO_ADDR 0x28
47#define SFP_EEPROM_PART_NO_SIZE 16
48#define PWR_FLT_ERR_MSG_LEN 250
49/***********************************************************/
50/* Structs */
51/***********************************************************/
52/* Inputs parameters to the CLC */
53struct link_params {
54
55 u8 port;
56
57 /* Default / User Configuration */
58 u8 loopback_mode;
59#define LOOPBACK_NONE 0
60#define LOOPBACK_EMAC 1
61#define LOOPBACK_BMAC 2
62#define LOOPBACK_XGXS_10 3
63#define LOOPBACK_EXT_PHY 4
64#define LOOPBACK_EXT 5
65
66 u16 req_duplex;
67 u16 req_flow_ctrl;
68 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
69 req_flow_ctrl is set to AUTO */
70 u16 req_line_speed; /* Also determine AutoNeg */
71
72 /* Device parameters */
73 u8 mac_addr[6];
74
75 /* shmem parameters */
76 u32 shmem_base;
77 u32 speed_cap_mask;
78 u32 switch_cfg;
79#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
80#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
81#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
82
83 u16 hw_led_mode; /* part of the hw_config read from the shmem */
84
85 /* phy_addr populated by the phy_init function */
86 u8 phy_addr;
87 /*u8 reserved1;*/
88
89 u32 lane_config;
90 u32 ext_phy_config;
91#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
92 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
93#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
94 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
95 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
96#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
97 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
98
99 /* Phy register parameter */
100 u32 chip_id;
101
102 u16 xgxs_config_rx[4]; /* preemphasis values for the rx side */
103 u16 xgxs_config_tx[4]; /* preemphasis values for the tx side */
104
105 u32 feature_config_flags;
106#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
107#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
108#define FEATURE_CONFIG_BCM8727_NOC (1<<3)
109
110 /* Device pointer passed to all callback functions */
111 struct bnx2x *bp;
112};
113
114/* Output parameters */
115struct link_vars {
116 u8 phy_flags;
117
118 u8 mac_type;
119#define MAC_TYPE_NONE 0
120#define MAC_TYPE_EMAC 1
121#define MAC_TYPE_BMAC 2
122
123 u8 phy_link_up; /* internal phy link indication */
124 u8 link_up;
125
126 u16 line_speed;
127 u16 duplex;
128
129 u16 flow_ctrl;
130 u16 ieee_fc;
131
132 u32 autoneg;
133#define AUTO_NEG_DISABLED 0x0
134#define AUTO_NEG_ENABLED 0x1
135#define AUTO_NEG_COMPLETE 0x2
136#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
137
138 /* The same definitions as the shmem parameter */
139 u32 link_status;
140};
141
142/***********************************************************/
143/* Functions */
144/***********************************************************/
145
146/* Initialize the phy */
147u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
148
149/* Reset the link. Should be called when driver or interface goes down
150 Before calling phy firmware upgrade, the reset_ext_phy should be set
151 to 0 */
152u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
153 u8 reset_ext_phy);
154
155/* bnx2x_link_update should be called upon link interrupt */
156u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
157
158/* use the following cl45 functions to read/write from external_phy
159 In order to use it to read/write internal phy registers, use
160 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
161 Use ext_phy_type of 0 in case of cl22 over cl45
162 the register */
163u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
164 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val);
165
166u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
167 u8 phy_addr, u8 devad, u16 reg, u16 val);
168
169/* Reads the link_status from the shmem,
170 and update the link vars accordingly */
171void bnx2x_link_status_update(struct link_params *input,
172 struct link_vars *output);
173/* returns string representing the fw_version of the external phy */
174u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
175 u8 *version, u16 len);
176
177/* Set/Unset the led
178 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
182#define LED_MODE_OFF 0
183#define LED_MODE_OPER 2
184
185u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
186
187/* bnx2x_handle_module_detect_int should be called upon module detection
188 interrupt */
189void bnx2x_handle_module_detect_int(struct link_params *params);
190
191/* Get the actual link status. In case it returns 0, link is up,
192 otherwise link is down*/
193u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
194
195/* One-time initialization for external phy after power up */
196u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
197
198/* Reset the external PHY using GPIO */
199void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
200
201void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr);
202
203u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
204 u8 byte_cnt, u8 *o_buf);
205
206#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
new file mode 100644
index 00000000000..b4ec2b02a46
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -0,0 +1,8040 @@
1/* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#include <linux/if_vlan.h>
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <net/ip6_checksum.h>
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/crc32c.h>
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
51#include <linux/io.h>
52#include <linux/stringify.h>
53
54#define BNX2X_MAIN
55#include "bnx2x.h"
56#include "bnx2x_init.h"
57#include "bnx2x_init_ops.h"
58#include "bnx2x_cmn.h"
59
60
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
71
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
74
75static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79MODULE_AUTHOR("Eliezer Tamir");
80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
95
96static int disable_tpa;
97module_param(disable_tpa, int, 0);
98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
104
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
109static int poll;
110module_param(poll, int, 0);
111MODULE_PARM_DESC(poll, " Use polling (for debug)");
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
117static int debug;
118module_param(debug, int, 0);
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
121static struct workqueue_struct *bnx2x_wq;
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
125 BCM57711 = 1,
126 BCM57711E = 2,
127};
128
129/* indexed by board_type, above */
130static struct {
131 char *name;
132} board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
136};
137
138
139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
174
175const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
200{
201 struct dmae_command dmae;
202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
214 memset(&dmae, 0, sizeof(struct dmae_command));
215
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219#ifdef __BIG_ENDIAN
220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
221#else
222 DMAE_CMD_ENDIANITY_DW_SWAP |
223#endif
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
234
235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245
246 mutex_lock(&bp->dmae_mutex);
247
248 *wb_comp = 0;
249
250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
251
252 udelay(5);
253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
257 if (!cnt) {
258 BNX2X_ERR("DMAE timeout!\n");
259 break;
260 }
261 cnt--;
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
267 }
268
269 mutex_unlock(&bp->dmae_mutex);
270}
271
272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273{
274 struct dmae_command dmae;
275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
289 memset(&dmae, 0, sizeof(struct dmae_command));
290
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294#ifdef __BIG_ENDIAN
295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
296#else
297 DMAE_CMD_ENDIANITY_DW_SWAP |
298#endif
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
309
310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321 *wb_comp = 0;
322
323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325 udelay(5);
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
329 if (!cnt) {
330 BNX2X_ERR("DMAE timeout!\n");
331 break;
332 }
333 cnt--;
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
339 }
340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343
344 mutex_unlock(&bp->dmae_mutex);
345}
346
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351 int offset = 0;
352
353 while (len > dmae_wr_max) {
354 bnx2x_write_dmae(bp, phys_addr + offset,
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
371}
372
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
386 char last_idx;
387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
389
390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
415 }
416 }
417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
499 }
500 }
501
502 return rc;
503}
504
505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
507 u32 addr;
508 u32 mark, offset;
509 __be32 data[9];
510 int word;
511
512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark);
521
522 pr_err("");
523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524 for (word = 0; word < 8; word++)
525 data[word] = htonl(REG_RD(bp, offset + 4*word));
526 data[8] = 0x0;
527 pr_cont("%s", (char *)data);
528 }
529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
532 data[8] = 0x0;
533 pr_cont("%s", (char *)data);
534 }
535 pr_err("end of fw dump\n");
536}
537
538void bnx2x_panic_dump(struct bnx2x *bp)
539{
540 int i;
541 u16 j, start, end;
542
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
546 BNX2X_ERR("begin crash dump -----------------\n");
547
548 /* Indices */
549 /* Common */
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
557 for_each_queue(bp, i) {
558 struct bnx2x_fastpath *fp = &bp->fp[i];
559
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
572
573 /* Tx */
574 for_each_queue(bp, i) {
575 struct bnx2x_fastpath *fp = &bp->fp[i];
576
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584 fp->status_blk->c_status_block.status_block_index,
585 fp->tx_db.data.prod);
586 }
587
588 /* Rings */
589 /* Rx */
590 for_each_queue(bp, i) {
591 struct bnx2x_fastpath *fp = &bp->fp[i];
592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595 for (j = start; j != end; j = RX_BD(j + 1)) {
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601 }
602
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
605 for (j = start; j != end; j = RX_SGE(j + 1)) {
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
611 }
612
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
620 }
621 }
622
623 /* Tx */
624 for_each_queue(bp, i) {
625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 }
644 }
645
646 bnx2x_fw_dump(bp);
647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
649}
650
651void bnx2x_int_enable(struct bnx2x *bp)
652{
653 int port = BP_PORT(bp);
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658
659 if (msix) {
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
677
678 REG_WR(bp, addr, val);
679
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685
686 REG_WR(bp, addr, val);
687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697 if (bp->port.pmf)
698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
709}
710
711static void bnx2x_int_disable(struct bnx2x *bp)
712{
713 int port = BP_PORT(bp);
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
725 /* flush all outstanding writes */
726 mmiowb();
727
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734{
735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736 int i, offset;
737
738 /* disable interrupt handling */
739 atomic_inc(&bp->intr_sem);
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
745
746 /* make sure all ISRs are done */
747 if (msix) {
748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
750#ifdef BCM_CNIC
751 offset++;
752#endif
753 for_each_queue(bp, i)
754 synchronize_irq(bp->msix_table[i + offset].vector);
755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
761}
762
763/* fast path */
764
765/*
766 * General service functions
767 */
768
769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
784 return -EINVAL;
785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
803
804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
807
808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
815 DP(BNX2X_MSG_SP,
816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
817 fp->index, cid, command, bp->state,
818 rr_cqe->ramrod_cqe.ramrod_type);
819
820 bp->spq_left++;
821
822 if (fp->index) {
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
841 break;
842 }
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
844 return;
845 }
846
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
862 break;
863
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
870
871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 bp->set_mac_pending--;
875 smp_wmb();
876 break;
877
878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880 bp->set_mac_pending--;
881 smp_wmb();
882 break;
883
884 default:
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
886 command, bp->state);
887 break;
888 }
889 mb(); /* force bnx2x_wait_ramrod() to see the change */
890}
891
892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
893{
894 struct bnx2x *bp = netdev_priv(dev_instance);
895 u16 status = bnx2x_ack_int(bp);
896 u16 mask;
897 int i;
898
899 /* Return here if interrupt is shared and it's not for us */
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
905
906 /* Return here if interrupt is disabled */
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
919
920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930 status &= ~mask;
931 }
932 }
933
934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
948
949 if (unlikely(status & 0x1)) {
950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959 status);
960
961 return IRQ_HANDLED;
962}
963
964/* end of fast path */
965
966
967/* Link */
968
969/*
970 * General service functions
971 */
972
973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
974{
975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
979 int cnt;
980
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
988
989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
996 /* Validating that the resource is not already taken */
997 lock_status = REG_RD(bp, hw_lock_control_reg);
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
1003
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
1006 /* Try to acquire the lock */
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
1009 if (lock_status & resource_bit)
1010 return 0;
1011
1012 msleep(5);
1013 }
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
1017
1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
1024
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
1035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
1042 /* Validating that the resource is currently taken */
1043 lock_status = REG_RD(bp, hw_lock_control_reg);
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
1048 }
1049
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1051 return 0;
1052}
1053
1054
1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
1085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
1094
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
1099
1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
1112
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
1120
1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
1127
1128 default:
1129 break;
1130 }
1131
1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1134
1135 return 0;
1136}
1137
1138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
1184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185{
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
1188
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
1193 }
1194
1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199 switch (mode) {
1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
1206
1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
1213
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
1219
1220 default:
1221 break;
1222 }
1223
1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1226
1227 return 0;
1228}
1229
1230void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231{
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1236 ADVERTISED_Pause);
1237 break;
1238
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1241 ADVERTISED_Pause);
1242 break;
1243
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
1246 break;
1247
1248 default:
1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1250 ADVERTISED_Pause);
1251 break;
1252 }
1253}
1254
1255
1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257{
1258 if (!BP_NOMCP(bp)) {
1259 u8 rc;
1260
1261 /* Initialize link parameters structure variables */
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
1264 if (bp->dev->mtu > 5000)
1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1266 else
1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1268
1269 bnx2x_acquire_phy_lock(bp);
1270
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275
1276 bnx2x_release_phy_lock(bp);
1277
1278 bnx2x_calc_fc_adv(bp);
1279
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282 bnx2x_link_report(bp);
1283 }
1284
1285 return rc;
1286 }
1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1288 return -EINVAL;
1289}
1290
1291void bnx2x_link_set(struct bnx2x *bp)
1292{
1293 if (!BP_NOMCP(bp)) {
1294 bnx2x_acquire_phy_lock(bp);
1295 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1296 bnx2x_release_phy_lock(bp);
1297
1298 bnx2x_calc_fc_adv(bp);
1299 } else
1300 BNX2X_ERR("Bootcode is missing - can not set link\n");
1301}
1302
1303static void bnx2x__link_reset(struct bnx2x *bp)
1304{
1305 if (!BP_NOMCP(bp)) {
1306 bnx2x_acquire_phy_lock(bp);
1307 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1308 bnx2x_release_phy_lock(bp);
1309 } else
1310 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1311}
1312
1313u8 bnx2x_link_test(struct bnx2x *bp)
1314{
1315 u8 rc = 0;
1316
1317 if (!BP_NOMCP(bp)) {
1318 bnx2x_acquire_phy_lock(bp);
1319 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1320 bnx2x_release_phy_lock(bp);
1321 } else
1322 BNX2X_ERR("Bootcode is missing - can not test link\n");
1323
1324 return rc;
1325}
1326
1327static void bnx2x_init_port_minmax(struct bnx2x *bp)
1328{
1329 u32 r_param = bp->link_vars.line_speed / 8;
1330 u32 fair_periodic_timeout_usec;
1331 u32 t_fair;
1332
1333 memset(&(bp->cmng.rs_vars), 0,
1334 sizeof(struct rate_shaping_vars_per_port));
1335 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1336
1337 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1338 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1339
1340 /* this is the threshold below which no timer arming will occur
1341 1.25 coefficient is for the threshold to be a little bigger
1342 than the real time, to compensate for timer in-accuracy */
1343 bp->cmng.rs_vars.rs_threshold =
1344 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1345
1346 /* resolution of fairness timer */
1347 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1348 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1349 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1350
1351 /* this is the threshold below which we won't arm the timer anymore */
1352 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1353
1354 /* we multiply by 1e3/8 to get bytes/msec.
1355 We don't want the credits to pass a credit
1356 of the t_fair*FAIR_MEM (algorithm resolution) */
1357 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1358 /* since each tick is 4 usec */
1359 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1360}
1361
1362/* Calculates the sum of vn_min_rates.
1363 It's needed for further normalizing of the min_rates.
1364 Returns:
1365 sum of vn_min_rates.
1366 or
1367 0 - if all the min_rates are 0.
1368 In the later case fainess algorithm should be deactivated.
1369 If not all min_rates are zero then those that are zeroes will be set to 1.
1370 */
1371static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1372{
1373 int all_zero = 1;
1374 int port = BP_PORT(bp);
1375 int vn;
1376
1377 bp->vn_weight_sum = 0;
1378 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1379 int func = 2*vn + port;
1380 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1381 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1382 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1383
1384 /* Skip hidden vns */
1385 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1386 continue;
1387
1388 /* If min rate is zero - set it to 1 */
1389 if (!vn_min_rate)
1390 vn_min_rate = DEF_MIN_RATE;
1391 else
1392 all_zero = 0;
1393
1394 bp->vn_weight_sum += vn_min_rate;
1395 }
1396
1397 /* ... only if all min rates are zeros - disable fairness */
1398 if (all_zero) {
1399 bp->cmng.flags.cmng_enables &=
1400 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1401 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1402 " fairness will be disabled\n");
1403 } else
1404 bp->cmng.flags.cmng_enables |=
1405 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1406}
1407
1408static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1409{
1410 struct rate_shaping_vars_per_vn m_rs_vn;
1411 struct fairness_vars_per_vn m_fair_vn;
1412 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1413 u16 vn_min_rate, vn_max_rate;
1414 int i;
1415
1416 /* If function is hidden - set min and max to zeroes */
1417 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1418 vn_min_rate = 0;
1419 vn_max_rate = 0;
1420
1421 } else {
1422 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1423 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1424 /* If min rate is zero - set it to 1 */
1425 if (!vn_min_rate)
1426 vn_min_rate = DEF_MIN_RATE;
1427 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1428 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1429 }
1430 DP(NETIF_MSG_IFUP,
1431 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1432 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1433
1434 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1435 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1436
1437 /* global vn counter - maximal Mbps for this vn */
1438 m_rs_vn.vn_counter.rate = vn_max_rate;
1439
1440 /* quota - number of bytes transmitted in this period */
1441 m_rs_vn.vn_counter.quota =
1442 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1443
1444 if (bp->vn_weight_sum) {
1445 /* credit for each period of the fairness algorithm:
1446 number of bytes in T_FAIR (the vn share the port rate).
1447 vn_weight_sum should not be larger than 10000, thus
1448 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1449 than zero */
1450 m_fair_vn.vn_credit_delta =
1451 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1452 (8 * bp->vn_weight_sum))),
1453 (bp->cmng.fair_vars.fair_threshold * 2));
1454 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1455 m_fair_vn.vn_credit_delta);
1456 }
1457
1458 /* Store it to internal memory */
1459 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1460 REG_WR(bp, BAR_XSTRORM_INTMEM +
1461 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1462 ((u32 *)(&m_rs_vn))[i]);
1463
1464 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1465 REG_WR(bp, BAR_XSTRORM_INTMEM +
1466 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1467 ((u32 *)(&m_fair_vn))[i]);
1468}
1469
1470
1471/* This function is called upon link interrupt */
1472static void bnx2x_link_attn(struct bnx2x *bp)
1473{
1474 u32 prev_link_status = bp->link_vars.link_status;
1475 /* Make sure that we are synced with the current statistics */
1476 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1477
1478 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1479
1480 if (bp->link_vars.link_up) {
1481
1482 /* dropless flow control */
1483 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1484 int port = BP_PORT(bp);
1485 u32 pause_enabled = 0;
1486
1487 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1488 pause_enabled = 1;
1489
1490 REG_WR(bp, BAR_USTRORM_INTMEM +
1491 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1492 pause_enabled);
1493 }
1494
1495 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1496 struct host_port_stats *pstats;
1497
1498 pstats = bnx2x_sp(bp, port_stats);
1499 /* reset old bmac stats */
1500 memset(&(pstats->mac_stx[0]), 0,
1501 sizeof(struct mac_stx));
1502 }
1503 if (bp->state == BNX2X_STATE_OPEN)
1504 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1505 }
1506
1507 /* indicate link status only if link status actually changed */
1508 if (prev_link_status != bp->link_vars.link_status)
1509 bnx2x_link_report(bp);
1510
1511 if (IS_E1HMF(bp)) {
1512 int port = BP_PORT(bp);
1513 int func;
1514 int vn;
1515
1516 /* Set the attention towards other drivers on the same port */
1517 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1518 if (vn == BP_E1HVN(bp))
1519 continue;
1520
1521 func = ((vn << 1) | port);
1522 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1523 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1524 }
1525
1526 if (bp->link_vars.link_up) {
1527 int i;
1528
1529 /* Init rate shaping and fairness contexts */
1530 bnx2x_init_port_minmax(bp);
1531
1532 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1533 bnx2x_init_vn_minmax(bp, 2*vn + port);
1534
1535 /* Store it to internal memory */
1536 for (i = 0;
1537 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1538 REG_WR(bp, BAR_XSTRORM_INTMEM +
1539 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1540 ((u32 *)(&bp->cmng))[i]);
1541 }
1542 }
1543}
1544
1545void bnx2x__link_status_update(struct bnx2x *bp)
1546{
1547 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1548 return;
1549
1550 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1551
1552 if (bp->link_vars.link_up)
1553 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1554 else
1555 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1556
1557 bnx2x_calc_vn_weight_sum(bp);
1558
1559 /* indicate link status */
1560 bnx2x_link_report(bp);
1561}
1562
1563static void bnx2x_pmf_update(struct bnx2x *bp)
1564{
1565 int port = BP_PORT(bp);
1566 u32 val;
1567
1568 bp->port.pmf = 1;
1569 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1570
1571 /* enable nig attention */
1572 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1573 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1574 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1575
1576 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1577}
1578
1579/* end of Link */
1580
1581/* slow path */
1582
1583/*
1584 * General service functions
1585 */
1586
1587/* send the MCP a request, block until there is a reply */
1588u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1589{
1590 int func = BP_FUNC(bp);
1591 u32 seq = ++bp->fw_seq;
1592 u32 rc = 0;
1593 u32 cnt = 1;
1594 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595
1596 mutex_lock(&bp->fw_mb_mutex);
1597 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1598 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599
1600 do {
1601 /* let the FW do it's magic ... */
1602 msleep(delay);
1603
1604 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1605
1606 /* Give the FW up to 5 second (500*10ms) */
1607 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1608
1609 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1610 cnt*delay, rc, seq);
1611
1612 /* is this a reply to our command? */
1613 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1614 rc &= FW_MSG_CODE_MASK;
1615 else {
1616 /* FW BUG! */
1617 BNX2X_ERR("FW failed to respond!\n");
1618 bnx2x_fw_dump(bp);
1619 rc = 0;
1620 }
1621 mutex_unlock(&bp->fw_mb_mutex);
1622
1623 return rc;
1624}
1625
1626static void bnx2x_e1h_disable(struct bnx2x *bp)
1627{
1628 int port = BP_PORT(bp);
1629
1630 netif_tx_disable(bp->dev);
1631
1632 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1633
1634 netif_carrier_off(bp->dev);
1635}
1636
1637static void bnx2x_e1h_enable(struct bnx2x *bp)
1638{
1639 int port = BP_PORT(bp);
1640
1641 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1642
1643 /* Tx queue should be only reenabled */
1644 netif_tx_wake_all_queues(bp->dev);
1645
1646 /*
1647 * Should not call netif_carrier_on since it will be called if the link
1648 * is up when checking for link state
1649 */
1650}
1651
1652static void bnx2x_update_min_max(struct bnx2x *bp)
1653{
1654 int port = BP_PORT(bp);
1655 int vn, i;
1656
1657 /* Init rate shaping and fairness contexts */
1658 bnx2x_init_port_minmax(bp);
1659
1660 bnx2x_calc_vn_weight_sum(bp);
1661
1662 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1663 bnx2x_init_vn_minmax(bp, 2*vn + port);
1664
1665 if (bp->port.pmf) {
1666 int func;
1667
1668 /* Set the attention towards other drivers on the same port */
1669 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1670 if (vn == BP_E1HVN(bp))
1671 continue;
1672
1673 func = ((vn << 1) | port);
1674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1675 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1676 }
1677
1678 /* Store it to internal memory */
1679 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1680 REG_WR(bp, BAR_XSTRORM_INTMEM +
1681 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1682 ((u32 *)(&bp->cmng))[i]);
1683 }
1684}
1685
1686static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1687{
1688 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1689
1690 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1691
1692 /*
1693 * This is the only place besides the function initialization
1694 * where the bp->flags can change so it is done without any
1695 * locks
1696 */
1697 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1698 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1699 bp->flags |= MF_FUNC_DIS;
1700
1701 bnx2x_e1h_disable(bp);
1702 } else {
1703 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1704 bp->flags &= ~MF_FUNC_DIS;
1705
1706 bnx2x_e1h_enable(bp);
1707 }
1708 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1709 }
1710 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1711
1712 bnx2x_update_min_max(bp);
1713 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1714 }
1715
1716 /* Report results to MCP */
1717 if (dcc_event)
1718 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1719 else
1720 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1721}
1722
1723/* must be called under the spq lock */
1724static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1725{
1726 struct eth_spe *next_spe = bp->spq_prod_bd;
1727
1728 if (bp->spq_prod_bd == bp->spq_last_bd) {
1729 bp->spq_prod_bd = bp->spq;
1730 bp->spq_prod_idx = 0;
1731 DP(NETIF_MSG_TIMER, "end of spq\n");
1732 } else {
1733 bp->spq_prod_bd++;
1734 bp->spq_prod_idx++;
1735 }
1736 return next_spe;
1737}
1738
1739/* must be called under the spq lock */
1740static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1741{
1742 int func = BP_FUNC(bp);
1743
1744 /* Make sure that BD data is updated before writing the producer */
1745 wmb();
1746
1747 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1748 bp->spq_prod_idx);
1749 mmiowb();
1750}
1751
1752/* the slow path queue is odd since completions arrive on the fastpath ring */
1753int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1754 u32 data_hi, u32 data_lo, int common)
1755{
1756 struct eth_spe *spe;
1757
1758#ifdef BNX2X_STOP_ON_ERROR
1759 if (unlikely(bp->panic))
1760 return -EIO;
1761#endif
1762
1763 spin_lock_bh(&bp->spq_lock);
1764
1765 if (!bp->spq_left) {
1766 BNX2X_ERR("BUG! SPQ ring full!\n");
1767 spin_unlock_bh(&bp->spq_lock);
1768 bnx2x_panic();
1769 return -EBUSY;
1770 }
1771
1772 spe = bnx2x_sp_get_next(bp);
1773
1774 /* CID needs port number to be encoded int it */
1775 spe->hdr.conn_and_cmd_data =
1776 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1777 HW_CID(bp, cid));
1778 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1779 if (common)
1780 spe->hdr.type |=
1781 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1782
1783 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1784 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1785
1786 bp->spq_left--;
1787
1788 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1789 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1790 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1791 (u32)(U64_LO(bp->spq_mapping) +
1792 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1793 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1794
1795 bnx2x_sp_prod_update(bp);
1796 spin_unlock_bh(&bp->spq_lock);
1797 return 0;
1798}
1799
1800/* acquire split MCP access lock register */
1801static int bnx2x_acquire_alr(struct bnx2x *bp)
1802{
1803 u32 j, val;
1804 int rc = 0;
1805
1806 might_sleep();
1807 for (j = 0; j < 1000; j++) {
1808 val = (1UL << 31);
1809 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1810 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1811 if (val & (1L << 31))
1812 break;
1813
1814 msleep(5);
1815 }
1816 if (!(val & (1L << 31))) {
1817 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1818 rc = -EBUSY;
1819 }
1820
1821 return rc;
1822}
1823
1824/* release split MCP access lock register */
1825static void bnx2x_release_alr(struct bnx2x *bp)
1826{
1827 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1828}
1829
1830static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1831{
1832 struct host_def_status_block *def_sb = bp->def_status_blk;
1833 u16 rc = 0;
1834
1835 barrier(); /* status block is written to by the chip */
1836 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1837 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1838 rc |= 1;
1839 }
1840 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1841 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1842 rc |= 2;
1843 }
1844 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1845 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1846 rc |= 4;
1847 }
1848 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1849 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1850 rc |= 8;
1851 }
1852 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1853 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1854 rc |= 16;
1855 }
1856 return rc;
1857}
1858
1859/*
1860 * slow path service functions
1861 */
1862
1863static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1864{
1865 int port = BP_PORT(bp);
1866 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1867 COMMAND_REG_ATTN_BITS_SET);
1868 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1869 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1870 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1871 NIG_REG_MASK_INTERRUPT_PORT0;
1872 u32 aeu_mask;
1873 u32 nig_mask = 0;
1874
1875 if (bp->attn_state & asserted)
1876 BNX2X_ERR("IGU ERROR\n");
1877
1878 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1879 aeu_mask = REG_RD(bp, aeu_addr);
1880
1881 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1882 aeu_mask, asserted);
1883 aeu_mask &= ~(asserted & 0x3ff);
1884 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1885
1886 REG_WR(bp, aeu_addr, aeu_mask);
1887 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1888
1889 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1890 bp->attn_state |= asserted;
1891 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1892
1893 if (asserted & ATTN_HARD_WIRED_MASK) {
1894 if (asserted & ATTN_NIG_FOR_FUNC) {
1895
1896 bnx2x_acquire_phy_lock(bp);
1897
1898 /* save nig interrupt mask */
1899 nig_mask = REG_RD(bp, nig_int_mask_addr);
1900 REG_WR(bp, nig_int_mask_addr, 0);
1901
1902 bnx2x_link_attn(bp);
1903
1904 /* handle unicore attn? */
1905 }
1906 if (asserted & ATTN_SW_TIMER_4_FUNC)
1907 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1908
1909 if (asserted & GPIO_2_FUNC)
1910 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1911
1912 if (asserted & GPIO_3_FUNC)
1913 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1914
1915 if (asserted & GPIO_4_FUNC)
1916 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1917
1918 if (port == 0) {
1919 if (asserted & ATTN_GENERAL_ATTN_1) {
1920 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1921 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1922 }
1923 if (asserted & ATTN_GENERAL_ATTN_2) {
1924 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1925 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1926 }
1927 if (asserted & ATTN_GENERAL_ATTN_3) {
1928 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1929 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1930 }
1931 } else {
1932 if (asserted & ATTN_GENERAL_ATTN_4) {
1933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1935 }
1936 if (asserted & ATTN_GENERAL_ATTN_5) {
1937 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1938 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1939 }
1940 if (asserted & ATTN_GENERAL_ATTN_6) {
1941 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1942 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1943 }
1944 }
1945
1946 } /* if hardwired */
1947
1948 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1949 asserted, hc_addr);
1950 REG_WR(bp, hc_addr, asserted);
1951
1952 /* now set back the mask */
1953 if (asserted & ATTN_NIG_FOR_FUNC) {
1954 REG_WR(bp, nig_int_mask_addr, nig_mask);
1955 bnx2x_release_phy_lock(bp);
1956 }
1957}
1958
1959static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960{
1961 int port = BP_PORT(bp);
1962
1963 /* mark the failure */
1964 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1965 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967 bp->link_params.ext_phy_config);
1968
1969 /* log the failure */
1970 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1971 " the driver to shutdown the card to prevent permanent"
1972 " damage. Please contact OEM Support for assistance\n");
1973}
1974
1975static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1976{
1977 int port = BP_PORT(bp);
1978 int reg_offset;
1979 u32 val, swap_val, swap_override;
1980
1981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1983
1984 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1985
1986 val = REG_RD(bp, reg_offset);
1987 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1988 REG_WR(bp, reg_offset, val);
1989
1990 BNX2X_ERR("SPIO5 hw attention\n");
1991
1992 /* Fan failure attention */
1993 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1995 /* Low power mode is controlled by GPIO 2 */
1996 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1997 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1998 /* The PHY reset is controlled by GPIO 1 */
1999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2001 break;
2002
2003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004 /* The PHY reset is controlled by GPIO 1 */
2005 /* fake the port number to cancel the swap done in
2006 set_gpio() */
2007 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009 port = (swap_val && swap_override) ^ 1;
2010 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012 break;
2013
2014 default:
2015 break;
2016 }
2017 bnx2x_fan_failure(bp);
2018 }
2019
2020 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2021 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2022 bnx2x_acquire_phy_lock(bp);
2023 bnx2x_handle_module_detect_int(&bp->link_params);
2024 bnx2x_release_phy_lock(bp);
2025 }
2026
2027 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2028
2029 val = REG_RD(bp, reg_offset);
2030 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2031 REG_WR(bp, reg_offset, val);
2032
2033 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2034 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2035 bnx2x_panic();
2036 }
2037}
2038
2039static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2040{
2041 u32 val;
2042
2043 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2044
2045 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2046 BNX2X_ERR("DB hw attention 0x%x\n", val);
2047 /* DORQ discard attention */
2048 if (val & 0x2)
2049 BNX2X_ERR("FATAL error from DORQ\n");
2050 }
2051
2052 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2053
2054 int port = BP_PORT(bp);
2055 int reg_offset;
2056
2057 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2058 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2059
2060 val = REG_RD(bp, reg_offset);
2061 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2062 REG_WR(bp, reg_offset, val);
2063
2064 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2065 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2066 bnx2x_panic();
2067 }
2068}
2069
2070static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2071{
2072 u32 val;
2073
2074 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2075
2076 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2077 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2078 /* CFC error attention */
2079 if (val & 0x2)
2080 BNX2X_ERR("FATAL error from CFC\n");
2081 }
2082
2083 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2084
2085 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2086 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2087 /* RQ_USDMDP_FIFO_OVERFLOW */
2088 if (val & 0x18000)
2089 BNX2X_ERR("FATAL error from PXP\n");
2090 }
2091
2092 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2093
2094 int port = BP_PORT(bp);
2095 int reg_offset;
2096
2097 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2098 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2099
2100 val = REG_RD(bp, reg_offset);
2101 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2102 REG_WR(bp, reg_offset, val);
2103
2104 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2105 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2106 bnx2x_panic();
2107 }
2108}
2109
2110static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2111{
2112 u32 val;
2113
2114 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2115
2116 if (attn & BNX2X_PMF_LINK_ASSERT) {
2117 int func = BP_FUNC(bp);
2118
2119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2120 bp->mf_config = SHMEM_RD(bp,
2121 mf_cfg.func_mf_config[func].config);
2122 val = SHMEM_RD(bp, func_mb[func].drv_status);
2123 if (val & DRV_STATUS_DCC_EVENT_MASK)
2124 bnx2x_dcc_event(bp,
2125 (val & DRV_STATUS_DCC_EVENT_MASK));
2126 bnx2x__link_status_update(bp);
2127 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2128 bnx2x_pmf_update(bp);
2129
2130 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2131
2132 BNX2X_ERR("MC assert!\n");
2133 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2136 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2137 bnx2x_panic();
2138
2139 } else if (attn & BNX2X_MCP_ASSERT) {
2140
2141 BNX2X_ERR("MCP assert!\n");
2142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2143 bnx2x_fw_dump(bp);
2144
2145 } else
2146 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2147 }
2148
2149 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2150 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2151 if (attn & BNX2X_GRC_TIMEOUT) {
2152 val = CHIP_IS_E1H(bp) ?
2153 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2154 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2155 }
2156 if (attn & BNX2X_GRC_RSV) {
2157 val = CHIP_IS_E1H(bp) ?
2158 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2159 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2160 }
2161 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2162 }
2163}
2164
2165#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2166#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2167#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2168#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2169#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2170#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2171/*
2172 * should be run under rtnl lock
2173 */
2174static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2175{
2176 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2177 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2178 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2179 barrier();
2180 mmiowb();
2181}
2182
2183/*
2184 * should be run under rtnl lock
2185 */
2186static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2187{
2188 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2189 val |= (1 << 16);
2190 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2191 barrier();
2192 mmiowb();
2193}
2194
2195/*
2196 * should be run under rtnl lock
2197 */
2198bool bnx2x_reset_is_done(struct bnx2x *bp)
2199{
2200 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2201 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2202 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2203}
2204
2205/*
2206 * should be run under rtnl lock
2207 */
2208inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2209{
2210 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2211
2212 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2213
2214 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2215 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2216 barrier();
2217 mmiowb();
2218}
2219
2220/*
2221 * should be run under rtnl lock
2222 */
2223u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2224{
2225 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2226
2227 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2228
2229 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2230 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2231 barrier();
2232 mmiowb();
2233
2234 return val1;
2235}
2236
2237/*
2238 * should be run under rtnl lock
2239 */
2240static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2241{
2242 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2243}
2244
2245static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2246{
2247 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2248 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2249}
2250
2251static inline void _print_next_block(int idx, const char *blk)
2252{
2253 if (idx)
2254 pr_cont(", ");
2255 pr_cont("%s", blk);
2256}
2257
2258static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2259{
2260 int i = 0;
2261 u32 cur_bit = 0;
2262 for (i = 0; sig; i++) {
2263 cur_bit = ((u32)0x1 << i);
2264 if (sig & cur_bit) {
2265 switch (cur_bit) {
2266 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2267 _print_next_block(par_num++, "BRB");
2268 break;
2269 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2270 _print_next_block(par_num++, "PARSER");
2271 break;
2272 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2273 _print_next_block(par_num++, "TSDM");
2274 break;
2275 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2276 _print_next_block(par_num++, "SEARCHER");
2277 break;
2278 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2279 _print_next_block(par_num++, "TSEMI");
2280 break;
2281 }
2282
2283 /* Clear the bit */
2284 sig &= ~cur_bit;
2285 }
2286 }
2287
2288 return par_num;
2289}
2290
2291static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2292{
2293 int i = 0;
2294 u32 cur_bit = 0;
2295 for (i = 0; sig; i++) {
2296 cur_bit = ((u32)0x1 << i);
2297 if (sig & cur_bit) {
2298 switch (cur_bit) {
2299 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2300 _print_next_block(par_num++, "PBCLIENT");
2301 break;
2302 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2303 _print_next_block(par_num++, "QM");
2304 break;
2305 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2306 _print_next_block(par_num++, "XSDM");
2307 break;
2308 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2309 _print_next_block(par_num++, "XSEMI");
2310 break;
2311 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2312 _print_next_block(par_num++, "DOORBELLQ");
2313 break;
2314 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2315 _print_next_block(par_num++, "VAUX PCI CORE");
2316 break;
2317 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2318 _print_next_block(par_num++, "DEBUG");
2319 break;
2320 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2321 _print_next_block(par_num++, "USDM");
2322 break;
2323 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2324 _print_next_block(par_num++, "USEMI");
2325 break;
2326 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2327 _print_next_block(par_num++, "UPB");
2328 break;
2329 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2330 _print_next_block(par_num++, "CSDM");
2331 break;
2332 }
2333
2334 /* Clear the bit */
2335 sig &= ~cur_bit;
2336 }
2337 }
2338
2339 return par_num;
2340}
2341
2342static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2343{
2344 int i = 0;
2345 u32 cur_bit = 0;
2346 for (i = 0; sig; i++) {
2347 cur_bit = ((u32)0x1 << i);
2348 if (sig & cur_bit) {
2349 switch (cur_bit) {
2350 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2351 _print_next_block(par_num++, "CSEMI");
2352 break;
2353 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2354 _print_next_block(par_num++, "PXP");
2355 break;
2356 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2357 _print_next_block(par_num++,
2358 "PXPPCICLOCKCLIENT");
2359 break;
2360 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2361 _print_next_block(par_num++, "CFC");
2362 break;
2363 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2364 _print_next_block(par_num++, "CDU");
2365 break;
2366 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2367 _print_next_block(par_num++, "IGU");
2368 break;
2369 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2370 _print_next_block(par_num++, "MISC");
2371 break;
2372 }
2373
2374 /* Clear the bit */
2375 sig &= ~cur_bit;
2376 }
2377 }
2378
2379 return par_num;
2380}
2381
2382static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2383{
2384 int i = 0;
2385 u32 cur_bit = 0;
2386 for (i = 0; sig; i++) {
2387 cur_bit = ((u32)0x1 << i);
2388 if (sig & cur_bit) {
2389 switch (cur_bit) {
2390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2391 _print_next_block(par_num++, "MCP ROM");
2392 break;
2393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2394 _print_next_block(par_num++, "MCP UMP RX");
2395 break;
2396 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2397 _print_next_block(par_num++, "MCP UMP TX");
2398 break;
2399 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2400 _print_next_block(par_num++, "MCP SCPAD");
2401 break;
2402 }
2403
2404 /* Clear the bit */
2405 sig &= ~cur_bit;
2406 }
2407 }
2408
2409 return par_num;
2410}
2411
2412static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2413 u32 sig2, u32 sig3)
2414{
2415 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2416 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2417 int par_num = 0;
2418 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2419 "[0]:0x%08x [1]:0x%08x "
2420 "[2]:0x%08x [3]:0x%08x\n",
2421 sig0 & HW_PRTY_ASSERT_SET_0,
2422 sig1 & HW_PRTY_ASSERT_SET_1,
2423 sig2 & HW_PRTY_ASSERT_SET_2,
2424 sig3 & HW_PRTY_ASSERT_SET_3);
2425 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2426 bp->dev->name);
2427 par_num = bnx2x_print_blocks_with_parity0(
2428 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2429 par_num = bnx2x_print_blocks_with_parity1(
2430 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2431 par_num = bnx2x_print_blocks_with_parity2(
2432 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2433 par_num = bnx2x_print_blocks_with_parity3(
2434 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2435 printk("\n");
2436 return true;
2437 } else
2438 return false;
2439}
2440
2441bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2442{
2443 struct attn_route attn;
2444 int port = BP_PORT(bp);
2445
2446 attn.sig[0] = REG_RD(bp,
2447 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2448 port*4);
2449 attn.sig[1] = REG_RD(bp,
2450 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2451 port*4);
2452 attn.sig[2] = REG_RD(bp,
2453 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2454 port*4);
2455 attn.sig[3] = REG_RD(bp,
2456 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2457 port*4);
2458
2459 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2460 attn.sig[3]);
2461}
2462
2463static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2464{
2465 struct attn_route attn, *group_mask;
2466 int port = BP_PORT(bp);
2467 int index;
2468 u32 reg_addr;
2469 u32 val;
2470 u32 aeu_mask;
2471
2472 /* need to take HW lock because MCP or other port might also
2473 try to handle this event */
2474 bnx2x_acquire_alr(bp);
2475
2476 if (bnx2x_chk_parity_attn(bp)) {
2477 bp->recovery_state = BNX2X_RECOVERY_INIT;
2478 bnx2x_set_reset_in_progress(bp);
2479 schedule_delayed_work(&bp->reset_task, 0);
2480 /* Disable HW interrupts */
2481 bnx2x_int_disable(bp);
2482 bnx2x_release_alr(bp);
2483 /* In case of parity errors don't handle attentions so that
2484 * other function would "see" parity errors.
2485 */
2486 return;
2487 }
2488
2489 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2490 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2491 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2492 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2493 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2494 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2495
2496 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2497 if (deasserted & (1 << index)) {
2498 group_mask = &bp->attn_group[index];
2499
2500 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2501 index, group_mask->sig[0], group_mask->sig[1],
2502 group_mask->sig[2], group_mask->sig[3]);
2503
2504 bnx2x_attn_int_deasserted3(bp,
2505 attn.sig[3] & group_mask->sig[3]);
2506 bnx2x_attn_int_deasserted1(bp,
2507 attn.sig[1] & group_mask->sig[1]);
2508 bnx2x_attn_int_deasserted2(bp,
2509 attn.sig[2] & group_mask->sig[2]);
2510 bnx2x_attn_int_deasserted0(bp,
2511 attn.sig[0] & group_mask->sig[0]);
2512 }
2513 }
2514
2515 bnx2x_release_alr(bp);
2516
2517 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2518
2519 val = ~deasserted;
2520 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2521 val, reg_addr);
2522 REG_WR(bp, reg_addr, val);
2523
2524 if (~bp->attn_state & deasserted)
2525 BNX2X_ERR("IGU ERROR\n");
2526
2527 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2528 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2529
2530 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531 aeu_mask = REG_RD(bp, reg_addr);
2532
2533 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2534 aeu_mask, deasserted);
2535 aeu_mask |= (deasserted & 0x3ff);
2536 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2537
2538 REG_WR(bp, reg_addr, aeu_mask);
2539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540
2541 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2542 bp->attn_state &= ~deasserted;
2543 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2544}
2545
2546static void bnx2x_attn_int(struct bnx2x *bp)
2547{
2548 /* read local copy of bits */
2549 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2550 attn_bits);
2551 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2552 attn_bits_ack);
2553 u32 attn_state = bp->attn_state;
2554
2555 /* look for changed bits */
2556 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2557 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2558
2559 DP(NETIF_MSG_HW,
2560 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2561 attn_bits, attn_ack, asserted, deasserted);
2562
2563 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2564 BNX2X_ERR("BAD attention state\n");
2565
2566 /* handle bits that were raised */
2567 if (asserted)
2568 bnx2x_attn_int_asserted(bp, asserted);
2569
2570 if (deasserted)
2571 bnx2x_attn_int_deasserted(bp, deasserted);
2572}
2573
2574static void bnx2x_sp_task(struct work_struct *work)
2575{
2576 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2577 u16 status;
2578
2579 /* Return here if interrupt is disabled */
2580 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2581 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2582 return;
2583 }
2584
2585 status = bnx2x_update_dsb_idx(bp);
2586/* if (status == 0) */
2587/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2588
2589 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2590
2591 /* HW attentions */
2592 if (status & 0x1) {
2593 bnx2x_attn_int(bp);
2594 status &= ~0x1;
2595 }
2596
2597 /* CStorm events: STAT_QUERY */
2598 if (status & 0x2) {
2599 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2600 status &= ~0x2;
2601 }
2602
2603 if (unlikely(status))
2604 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2605 status);
2606
2607 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2608 IGU_INT_NOP, 1);
2609 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2610 IGU_INT_NOP, 1);
2611 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2612 IGU_INT_NOP, 1);
2613 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2614 IGU_INT_NOP, 1);
2615 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2616 IGU_INT_ENABLE, 1);
2617}
2618
2619irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2620{
2621 struct net_device *dev = dev_instance;
2622 struct bnx2x *bp = netdev_priv(dev);
2623
2624 /* Return here if interrupt is disabled */
2625 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2626 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2627 return IRQ_HANDLED;
2628 }
2629
2630 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2631
2632#ifdef BNX2X_STOP_ON_ERROR
2633 if (unlikely(bp->panic))
2634 return IRQ_HANDLED;
2635#endif
2636
2637#ifdef BCM_CNIC
2638 {
2639 struct cnic_ops *c_ops;
2640
2641 rcu_read_lock();
2642 c_ops = rcu_dereference(bp->cnic_ops);
2643 if (c_ops)
2644 c_ops->cnic_handler(bp->cnic_data, NULL);
2645 rcu_read_unlock();
2646 }
2647#endif
2648 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2649
2650 return IRQ_HANDLED;
2651}
2652
2653/* end of slow path */
2654
2655static void bnx2x_timer(unsigned long data)
2656{
2657 struct bnx2x *bp = (struct bnx2x *) data;
2658
2659 if (!netif_running(bp->dev))
2660 return;
2661
2662 if (atomic_read(&bp->intr_sem) != 0)
2663 goto timer_restart;
2664
2665 if (poll) {
2666 struct bnx2x_fastpath *fp = &bp->fp[0];
2667 int rc;
2668
2669 bnx2x_tx_int(fp);
2670 rc = bnx2x_rx_int(fp, 1000);
2671 }
2672
2673 if (!BP_NOMCP(bp)) {
2674 int func = BP_FUNC(bp);
2675 u32 drv_pulse;
2676 u32 mcp_pulse;
2677
2678 ++bp->fw_drv_pulse_wr_seq;
2679 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2680 /* TBD - add SYSTEM_TIME */
2681 drv_pulse = bp->fw_drv_pulse_wr_seq;
2682 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2683
2684 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2685 MCP_PULSE_SEQ_MASK);
2686 /* The delta between driver pulse and mcp response
2687 * should be 1 (before mcp response) or 0 (after mcp response)
2688 */
2689 if ((drv_pulse != mcp_pulse) &&
2690 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2691 /* someone lost a heartbeat... */
2692 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2693 drv_pulse, mcp_pulse);
2694 }
2695 }
2696
2697 if (bp->state == BNX2X_STATE_OPEN)
2698 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2699
2700timer_restart:
2701 mod_timer(&bp->timer, jiffies + bp->current_interval);
2702}
2703
2704/* end of Statistics */
2705
2706/* nic init */
2707
2708/*
2709 * nic init service functions
2710 */
2711
2712static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2713{
2714 int port = BP_PORT(bp);
2715
2716 /* "CSTORM" */
2717 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2718 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2719 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2720 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2721 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2722 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2723}
2724
2725void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2726 dma_addr_t mapping, int sb_id)
2727{
2728 int port = BP_PORT(bp);
2729 int func = BP_FUNC(bp);
2730 int index;
2731 u64 section;
2732
2733 /* USTORM */
2734 section = ((u64)mapping) + offsetof(struct host_status_block,
2735 u_status_block);
2736 sb->u_status_block.status_block_id = sb_id;
2737
2738 REG_WR(bp, BAR_CSTRORM_INTMEM +
2739 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2740 REG_WR(bp, BAR_CSTRORM_INTMEM +
2741 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2742 U64_HI(section));
2743 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2744 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2745
2746 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2747 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2748 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2749
2750 /* CSTORM */
2751 section = ((u64)mapping) + offsetof(struct host_status_block,
2752 c_status_block);
2753 sb->c_status_block.status_block_id = sb_id;
2754
2755 REG_WR(bp, BAR_CSTRORM_INTMEM +
2756 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2757 REG_WR(bp, BAR_CSTRORM_INTMEM +
2758 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2759 U64_HI(section));
2760 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2761 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2762
2763 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2764 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2765 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2766
2767 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2768}
2769
2770static void bnx2x_zero_def_sb(struct bnx2x *bp)
2771{
2772 int func = BP_FUNC(bp);
2773
2774 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2775 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2776 sizeof(struct tstorm_def_status_block)/4);
2777 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2778 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2779 sizeof(struct cstorm_def_status_block_u)/4);
2780 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2781 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2782 sizeof(struct cstorm_def_status_block_c)/4);
2783 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2784 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2785 sizeof(struct xstorm_def_status_block)/4);
2786}
2787
2788static void bnx2x_init_def_sb(struct bnx2x *bp,
2789 struct host_def_status_block *def_sb,
2790 dma_addr_t mapping, int sb_id)
2791{
2792 int port = BP_PORT(bp);
2793 int func = BP_FUNC(bp);
2794 int index, val, reg_offset;
2795 u64 section;
2796
2797 /* ATTN */
2798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2799 atten_status_block);
2800 def_sb->atten_status_block.status_block_id = sb_id;
2801
2802 bp->attn_state = 0;
2803
2804 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2805 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2806
2807 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2808 bp->attn_group[index].sig[0] = REG_RD(bp,
2809 reg_offset + 0x10*index);
2810 bp->attn_group[index].sig[1] = REG_RD(bp,
2811 reg_offset + 0x4 + 0x10*index);
2812 bp->attn_group[index].sig[2] = REG_RD(bp,
2813 reg_offset + 0x8 + 0x10*index);
2814 bp->attn_group[index].sig[3] = REG_RD(bp,
2815 reg_offset + 0xc + 0x10*index);
2816 }
2817
2818 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2819 HC_REG_ATTN_MSG0_ADDR_L);
2820
2821 REG_WR(bp, reg_offset, U64_LO(section));
2822 REG_WR(bp, reg_offset + 4, U64_HI(section));
2823
2824 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2825
2826 val = REG_RD(bp, reg_offset);
2827 val |= sb_id;
2828 REG_WR(bp, reg_offset, val);
2829
2830 /* USTORM */
2831 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2832 u_def_status_block);
2833 def_sb->u_def_status_block.status_block_id = sb_id;
2834
2835 REG_WR(bp, BAR_CSTRORM_INTMEM +
2836 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2837 REG_WR(bp, BAR_CSTRORM_INTMEM +
2838 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2839 U64_HI(section));
2840 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2841 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2842
2843 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2844 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2845 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2846
2847 /* CSTORM */
2848 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849 c_def_status_block);
2850 def_sb->c_def_status_block.status_block_id = sb_id;
2851
2852 REG_WR(bp, BAR_CSTRORM_INTMEM +
2853 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2854 REG_WR(bp, BAR_CSTRORM_INTMEM +
2855 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2856 U64_HI(section));
2857 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2858 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2859
2860 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2861 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2862 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2863
2864 /* TSTORM */
2865 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2866 t_def_status_block);
2867 def_sb->t_def_status_block.status_block_id = sb_id;
2868
2869 REG_WR(bp, BAR_TSTRORM_INTMEM +
2870 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2871 REG_WR(bp, BAR_TSTRORM_INTMEM +
2872 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2873 U64_HI(section));
2874 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2875 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2876
2877 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2878 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2879 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2880
2881 /* XSTORM */
2882 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2883 x_def_status_block);
2884 def_sb->x_def_status_block.status_block_id = sb_id;
2885
2886 REG_WR(bp, BAR_XSTRORM_INTMEM +
2887 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2888 REG_WR(bp, BAR_XSTRORM_INTMEM +
2889 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2890 U64_HI(section));
2891 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2892 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2893
2894 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2895 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2896 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2897
2898 bp->stats_pending = 0;
2899 bp->set_mac_pending = 0;
2900
2901 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2902}
2903
2904void bnx2x_update_coalesce(struct bnx2x *bp)
2905{
2906 int port = BP_PORT(bp);
2907 int i;
2908
2909 for_each_queue(bp, i) {
2910 int sb_id = bp->fp[i].sb_id;
2911
2912 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2913 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2914 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2915 U_SB_ETH_RX_CQ_INDEX),
2916 bp->rx_ticks/(4 * BNX2X_BTR));
2917 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2918 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2919 U_SB_ETH_RX_CQ_INDEX),
2920 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2921
2922 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2923 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2924 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2925 C_SB_ETH_TX_CQ_INDEX),
2926 bp->tx_ticks/(4 * BNX2X_BTR));
2927 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2928 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2929 C_SB_ETH_TX_CQ_INDEX),
2930 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2931 }
2932}
2933
2934static void bnx2x_init_sp_ring(struct bnx2x *bp)
2935{
2936 int func = BP_FUNC(bp);
2937
2938 spin_lock_init(&bp->spq_lock);
2939
2940 bp->spq_left = MAX_SPQ_PENDING;
2941 bp->spq_prod_idx = 0;
2942 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2943 bp->spq_prod_bd = bp->spq;
2944 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2945
2946 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2947 U64_LO(bp->spq_mapping));
2948 REG_WR(bp,
2949 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2950 U64_HI(bp->spq_mapping));
2951
2952 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2953 bp->spq_prod_idx);
2954}
2955
2956static void bnx2x_init_context(struct bnx2x *bp)
2957{
2958 int i;
2959
2960 /* Rx */
2961 for_each_queue(bp, i) {
2962 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2963 struct bnx2x_fastpath *fp = &bp->fp[i];
2964 u8 cl_id = fp->cl_id;
2965
2966 context->ustorm_st_context.common.sb_index_numbers =
2967 BNX2X_RX_SB_INDEX_NUM;
2968 context->ustorm_st_context.common.clientId = cl_id;
2969 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2970 context->ustorm_st_context.common.flags =
2971 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2972 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2973 context->ustorm_st_context.common.statistics_counter_id =
2974 cl_id;
2975 context->ustorm_st_context.common.mc_alignment_log_size =
2976 BNX2X_RX_ALIGN_SHIFT;
2977 context->ustorm_st_context.common.bd_buff_size =
2978 bp->rx_buf_size;
2979 context->ustorm_st_context.common.bd_page_base_hi =
2980 U64_HI(fp->rx_desc_mapping);
2981 context->ustorm_st_context.common.bd_page_base_lo =
2982 U64_LO(fp->rx_desc_mapping);
2983 if (!fp->disable_tpa) {
2984 context->ustorm_st_context.common.flags |=
2985 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2986 context->ustorm_st_context.common.sge_buff_size =
2987 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2988 0xffff);
2989 context->ustorm_st_context.common.sge_page_base_hi =
2990 U64_HI(fp->rx_sge_mapping);
2991 context->ustorm_st_context.common.sge_page_base_lo =
2992 U64_LO(fp->rx_sge_mapping);
2993
2994 context->ustorm_st_context.common.max_sges_for_packet =
2995 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2996 context->ustorm_st_context.common.max_sges_for_packet =
2997 ((context->ustorm_st_context.common.
2998 max_sges_for_packet + PAGES_PER_SGE - 1) &
2999 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3000 }
3001
3002 context->ustorm_ag_context.cdu_usage =
3003 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3004 CDU_REGION_NUMBER_UCM_AG,
3005 ETH_CONNECTION_TYPE);
3006
3007 context->xstorm_ag_context.cdu_reserved =
3008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009 CDU_REGION_NUMBER_XCM_AG,
3010 ETH_CONNECTION_TYPE);
3011 }
3012
3013 /* Tx */
3014 for_each_queue(bp, i) {
3015 struct bnx2x_fastpath *fp = &bp->fp[i];
3016 struct eth_context *context =
3017 bnx2x_sp(bp, context[i].eth);
3018
3019 context->cstorm_st_context.sb_index_number =
3020 C_SB_ETH_TX_CQ_INDEX;
3021 context->cstorm_st_context.status_block_id = fp->sb_id;
3022
3023 context->xstorm_st_context.tx_bd_page_base_hi =
3024 U64_HI(fp->tx_desc_mapping);
3025 context->xstorm_st_context.tx_bd_page_base_lo =
3026 U64_LO(fp->tx_desc_mapping);
3027 context->xstorm_st_context.statistics_data = (fp->cl_id |
3028 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3029 }
3030}
3031
3032static void bnx2x_init_ind_table(struct bnx2x *bp)
3033{
3034 int func = BP_FUNC(bp);
3035 int i;
3036
3037 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3038 return;
3039
3040 DP(NETIF_MSG_IFUP,
3041 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
3042 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3043 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3044 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3045 bp->fp->cl_id + (i % bp->num_queues));
3046}
3047
3048void bnx2x_set_client_config(struct bnx2x *bp)
3049{
3050 struct tstorm_eth_client_config tstorm_client = {0};
3051 int port = BP_PORT(bp);
3052 int i;
3053
3054 tstorm_client.mtu = bp->dev->mtu;
3055 tstorm_client.config_flags =
3056 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3057 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3058#ifdef BCM_VLAN
3059 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3060 tstorm_client.config_flags |=
3061 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3062 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3063 }
3064#endif
3065
3066 for_each_queue(bp, i) {
3067 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3068
3069 REG_WR(bp, BAR_TSTRORM_INTMEM +
3070 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3071 ((u32 *)&tstorm_client)[0]);
3072 REG_WR(bp, BAR_TSTRORM_INTMEM +
3073 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3074 ((u32 *)&tstorm_client)[1]);
3075 }
3076
3077 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3078 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3079}
3080
3081void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3082{
3083 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3084 int mode = bp->rx_mode;
3085 int mask = bp->rx_mode_cl_mask;
3086 int func = BP_FUNC(bp);
3087 int port = BP_PORT(bp);
3088 int i;
3089 /* All but management unicast packets should pass to the host as well */
3090 u32 llh_mask =
3091 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3092 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3095
3096 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3097
3098 switch (mode) {
3099 case BNX2X_RX_MODE_NONE: /* no Rx */
3100 tstorm_mac_filter.ucast_drop_all = mask;
3101 tstorm_mac_filter.mcast_drop_all = mask;
3102 tstorm_mac_filter.bcast_drop_all = mask;
3103 break;
3104
3105 case BNX2X_RX_MODE_NORMAL:
3106 tstorm_mac_filter.bcast_accept_all = mask;
3107 break;
3108
3109 case BNX2X_RX_MODE_ALLMULTI:
3110 tstorm_mac_filter.mcast_accept_all = mask;
3111 tstorm_mac_filter.bcast_accept_all = mask;
3112 break;
3113
3114 case BNX2X_RX_MODE_PROMISC:
3115 tstorm_mac_filter.ucast_accept_all = mask;
3116 tstorm_mac_filter.mcast_accept_all = mask;
3117 tstorm_mac_filter.bcast_accept_all = mask;
3118 /* pass management unicast packets as well */
3119 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3120 break;
3121
3122 default:
3123 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3124 break;
3125 }
3126
3127 REG_WR(bp,
3128 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3129 llh_mask);
3130
3131 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3132 REG_WR(bp, BAR_TSTRORM_INTMEM +
3133 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3134 ((u32 *)&tstorm_mac_filter)[i]);
3135
3136/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3137 ((u32 *)&tstorm_mac_filter)[i]); */
3138 }
3139
3140 if (mode != BNX2X_RX_MODE_NONE)
3141 bnx2x_set_client_config(bp);
3142}
3143
3144static void bnx2x_init_internal_common(struct bnx2x *bp)
3145{
3146 int i;
3147
3148 /* Zero this manually as its initialization is
3149 currently missing in the initTool */
3150 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3151 REG_WR(bp, BAR_USTRORM_INTMEM +
3152 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3153}
3154
3155static void bnx2x_init_internal_port(struct bnx2x *bp)
3156{
3157 int port = BP_PORT(bp);
3158
3159 REG_WR(bp,
3160 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3161 REG_WR(bp,
3162 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3163 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3164 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165}
3166
3167static void bnx2x_init_internal_func(struct bnx2x *bp)
3168{
3169 struct tstorm_eth_function_common_config tstorm_config = {0};
3170 struct stats_indication_flags stats_flags = {0};
3171 int port = BP_PORT(bp);
3172 int func = BP_FUNC(bp);
3173 int i, j;
3174 u32 offset;
3175 u16 max_agg_size;
3176
3177 tstorm_config.config_flags = RSS_FLAGS(bp);
3178
3179 if (is_multi(bp))
3180 tstorm_config.rss_result_mask = MULTI_MASK;
3181
3182 /* Enable TPA if needed */
3183 if (bp->flags & TPA_ENABLE_FLAG)
3184 tstorm_config.config_flags |=
3185 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3186
3187 if (IS_E1HMF(bp))
3188 tstorm_config.config_flags |=
3189 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3190
3191 tstorm_config.leading_client_id = BP_L_ID(bp);
3192
3193 REG_WR(bp, BAR_TSTRORM_INTMEM +
3194 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3195 (*(u32 *)&tstorm_config));
3196
3197 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3198 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3199 bnx2x_set_storm_rx_mode(bp);
3200
3201 for_each_queue(bp, i) {
3202 u8 cl_id = bp->fp[i].cl_id;
3203
3204 /* reset xstorm per client statistics */
3205 offset = BAR_XSTRORM_INTMEM +
3206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3207 for (j = 0;
3208 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3209 REG_WR(bp, offset + j*4, 0);
3210
3211 /* reset tstorm per client statistics */
3212 offset = BAR_TSTRORM_INTMEM +
3213 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3214 for (j = 0;
3215 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3216 REG_WR(bp, offset + j*4, 0);
3217
3218 /* reset ustorm per client statistics */
3219 offset = BAR_USTRORM_INTMEM +
3220 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3221 for (j = 0;
3222 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3223 REG_WR(bp, offset + j*4, 0);
3224 }
3225
3226 /* Init statistics related context */
3227 stats_flags.collect_eth = 1;
3228
3229 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3230 ((u32 *)&stats_flags)[0]);
3231 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3232 ((u32 *)&stats_flags)[1]);
3233
3234 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3235 ((u32 *)&stats_flags)[0]);
3236 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3237 ((u32 *)&stats_flags)[1]);
3238
3239 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3240 ((u32 *)&stats_flags)[0]);
3241 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3242 ((u32 *)&stats_flags)[1]);
3243
3244 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3245 ((u32 *)&stats_flags)[0]);
3246 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3247 ((u32 *)&stats_flags)[1]);
3248
3249 REG_WR(bp, BAR_XSTRORM_INTMEM +
3250 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3251 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3252 REG_WR(bp, BAR_XSTRORM_INTMEM +
3253 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3254 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3255
3256 REG_WR(bp, BAR_TSTRORM_INTMEM +
3257 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3258 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3259 REG_WR(bp, BAR_TSTRORM_INTMEM +
3260 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3261 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3262
3263 REG_WR(bp, BAR_USTRORM_INTMEM +
3264 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3265 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3266 REG_WR(bp, BAR_USTRORM_INTMEM +
3267 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3268 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3269
3270 if (CHIP_IS_E1H(bp)) {
3271 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3272 IS_E1HMF(bp));
3273 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3274 IS_E1HMF(bp));
3275 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3276 IS_E1HMF(bp));
3277 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3278 IS_E1HMF(bp));
3279
3280 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3281 bp->e1hov);
3282 }
3283
3284 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3285 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3286 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3287 for_each_queue(bp, i) {
3288 struct bnx2x_fastpath *fp = &bp->fp[i];
3289
3290 REG_WR(bp, BAR_USTRORM_INTMEM +
3291 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3292 U64_LO(fp->rx_comp_mapping));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
3294 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3295 U64_HI(fp->rx_comp_mapping));
3296
3297 /* Next page */
3298 REG_WR(bp, BAR_USTRORM_INTMEM +
3299 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3300 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3301 REG_WR(bp, BAR_USTRORM_INTMEM +
3302 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3303 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3304
3305 REG_WR16(bp, BAR_USTRORM_INTMEM +
3306 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3307 max_agg_size);
3308 }
3309
3310 /* dropless flow control */
3311 if (CHIP_IS_E1H(bp)) {
3312 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3313
3314 rx_pause.bd_thr_low = 250;
3315 rx_pause.cqe_thr_low = 250;
3316 rx_pause.cos = 1;
3317 rx_pause.sge_thr_low = 0;
3318 rx_pause.bd_thr_high = 350;
3319 rx_pause.cqe_thr_high = 350;
3320 rx_pause.sge_thr_high = 0;
3321
3322 for_each_queue(bp, i) {
3323 struct bnx2x_fastpath *fp = &bp->fp[i];
3324
3325 if (!fp->disable_tpa) {
3326 rx_pause.sge_thr_low = 150;
3327 rx_pause.sge_thr_high = 250;
3328 }
3329
3330
3331 offset = BAR_USTRORM_INTMEM +
3332 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3333 fp->cl_id);
3334 for (j = 0;
3335 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3336 j++)
3337 REG_WR(bp, offset + j*4,
3338 ((u32 *)&rx_pause)[j]);
3339 }
3340 }
3341
3342 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3343
3344 /* Init rate shaping and fairness contexts */
3345 if (IS_E1HMF(bp)) {
3346 int vn;
3347
3348 /* During init there is no active link
3349 Until link is up, set link rate to 10Gbps */
3350 bp->link_vars.line_speed = SPEED_10000;
3351 bnx2x_init_port_minmax(bp);
3352
3353 if (!BP_NOMCP(bp))
3354 bp->mf_config =
3355 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3356 bnx2x_calc_vn_weight_sum(bp);
3357
3358 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3359 bnx2x_init_vn_minmax(bp, 2*vn + port);
3360
3361 /* Enable rate shaping and fairness */
3362 bp->cmng.flags.cmng_enables |=
3363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3364
3365 } else {
3366 /* rate shaping and fairness are disabled */
3367 DP(NETIF_MSG_IFUP,
3368 "single function mode minmax will be disabled\n");
3369 }
3370
3371
3372 /* Store cmng structures to internal memory */
3373 if (bp->port.pmf)
3374 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3375 REG_WR(bp, BAR_XSTRORM_INTMEM +
3376 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3377 ((u32 *)(&bp->cmng))[i]);
3378}
3379
3380static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3381{
3382 switch (load_code) {
3383 case FW_MSG_CODE_DRV_LOAD_COMMON:
3384 bnx2x_init_internal_common(bp);
3385 /* no break */
3386
3387 case FW_MSG_CODE_DRV_LOAD_PORT:
3388 bnx2x_init_internal_port(bp);
3389 /* no break */
3390
3391 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3392 bnx2x_init_internal_func(bp);
3393 break;
3394
3395 default:
3396 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3397 break;
3398 }
3399}
3400
3401void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3402{
3403 int i;
3404
3405 for_each_queue(bp, i) {
3406 struct bnx2x_fastpath *fp = &bp->fp[i];
3407
3408 fp->bp = bp;
3409 fp->state = BNX2X_FP_STATE_CLOSED;
3410 fp->index = i;
3411 fp->cl_id = BP_L_ID(bp) + i;
3412#ifdef BCM_CNIC
3413 fp->sb_id = fp->cl_id + 1;
3414#else
3415 fp->sb_id = fp->cl_id;
3416#endif
3417 DP(NETIF_MSG_IFUP,
3418 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3419 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3420 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3421 fp->sb_id);
3422 bnx2x_update_fpsb_idx(fp);
3423 }
3424
3425 /* ensure status block indices were read */
3426 rmb();
3427
3428
3429 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3430 DEF_SB_ID);
3431 bnx2x_update_dsb_idx(bp);
3432 bnx2x_update_coalesce(bp);
3433 bnx2x_init_rx_rings(bp);
3434 bnx2x_init_tx_ring(bp);
3435 bnx2x_init_sp_ring(bp);
3436 bnx2x_init_context(bp);
3437 bnx2x_init_internal(bp, load_code);
3438 bnx2x_init_ind_table(bp);
3439 bnx2x_stats_init(bp);
3440
3441 /* At this point, we are ready for interrupts */
3442 atomic_set(&bp->intr_sem, 0);
3443
3444 /* flush all before enabling interrupts */
3445 mb();
3446 mmiowb();
3447
3448 bnx2x_int_enable(bp);
3449
3450 /* Check for SPIO5 */
3451 bnx2x_attn_int_deasserted0(bp,
3452 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3453 AEU_INPUTS_ATTN_BITS_SPIO5);
3454}
3455
3456/* end of nic init */
3457
3458/*
3459 * gzip service functions
3460 */
3461
3462static int bnx2x_gunzip_init(struct bnx2x *bp)
3463{
3464 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3465 &bp->gunzip_mapping, GFP_KERNEL);
3466 if (bp->gunzip_buf == NULL)
3467 goto gunzip_nomem1;
3468
3469 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3470 if (bp->strm == NULL)
3471 goto gunzip_nomem2;
3472
3473 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3474 GFP_KERNEL);
3475 if (bp->strm->workspace == NULL)
3476 goto gunzip_nomem3;
3477
3478 return 0;
3479
3480gunzip_nomem3:
3481 kfree(bp->strm);
3482 bp->strm = NULL;
3483
3484gunzip_nomem2:
3485 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3486 bp->gunzip_mapping);
3487 bp->gunzip_buf = NULL;
3488
3489gunzip_nomem1:
3490 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3491 " un-compression\n");
3492 return -ENOMEM;
3493}
3494
3495static void bnx2x_gunzip_end(struct bnx2x *bp)
3496{
3497 kfree(bp->strm->workspace);
3498
3499 kfree(bp->strm);
3500 bp->strm = NULL;
3501
3502 if (bp->gunzip_buf) {
3503 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3504 bp->gunzip_mapping);
3505 bp->gunzip_buf = NULL;
3506 }
3507}
3508
3509static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3510{
3511 int n, rc;
3512
3513 /* check gzip header */
3514 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3515 BNX2X_ERR("Bad gzip header\n");
3516 return -EINVAL;
3517 }
3518
3519 n = 10;
3520
3521#define FNAME 0x8
3522
3523 if (zbuf[3] & FNAME)
3524 while ((zbuf[n++] != 0) && (n < len));
3525
3526 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3527 bp->strm->avail_in = len - n;
3528 bp->strm->next_out = bp->gunzip_buf;
3529 bp->strm->avail_out = FW_BUF_SIZE;
3530
3531 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3532 if (rc != Z_OK)
3533 return rc;
3534
3535 rc = zlib_inflate(bp->strm, Z_FINISH);
3536 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3537 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3538 bp->strm->msg);
3539
3540 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3541 if (bp->gunzip_outlen & 0x3)
3542 netdev_err(bp->dev, "Firmware decompression error:"
3543 " gunzip_outlen (%d) not aligned\n",
3544 bp->gunzip_outlen);
3545 bp->gunzip_outlen >>= 2;
3546
3547 zlib_inflateEnd(bp->strm);
3548
3549 if (rc == Z_STREAM_END)
3550 return 0;
3551
3552 return rc;
3553}
3554
3555/* nic load/unload */
3556
3557/*
3558 * General service functions
3559 */
3560
3561/* send a NIG loopback debug packet */
3562static void bnx2x_lb_pckt(struct bnx2x *bp)
3563{
3564 u32 wb_write[3];
3565
3566 /* Ethernet source and destination addresses */
3567 wb_write[0] = 0x55555555;
3568 wb_write[1] = 0x55555555;
3569 wb_write[2] = 0x20; /* SOP */
3570 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3571
3572 /* NON-IP protocol */
3573 wb_write[0] = 0x09000000;
3574 wb_write[1] = 0x55555555;
3575 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
3576 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3577}
3578
3579/* some of the internal memories
3580 * are not directly readable from the driver
3581 * to test them we send debug packets
3582 */
3583static int bnx2x_int_mem_test(struct bnx2x *bp)
3584{
3585 int factor;
3586 int count, i;
3587 u32 val = 0;
3588
3589 if (CHIP_REV_IS_FPGA(bp))
3590 factor = 120;
3591 else if (CHIP_REV_IS_EMUL(bp))
3592 factor = 200;
3593 else
3594 factor = 1;
3595
3596 DP(NETIF_MSG_HW, "start part1\n");
3597
3598 /* Disable inputs of parser neighbor blocks */
3599 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3600 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3601 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3602 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3603
3604 /* Write 0 to parser credits for CFC search request */
3605 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3606
3607 /* send Ethernet packet */
3608 bnx2x_lb_pckt(bp);
3609
3610 /* TODO do i reset NIG statistic? */
3611 /* Wait until NIG register shows 1 packet of size 0x10 */
3612 count = 1000 * factor;
3613 while (count) {
3614
3615 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3616 val = *bnx2x_sp(bp, wb_data[0]);
3617 if (val == 0x10)
3618 break;
3619
3620 msleep(10);
3621 count--;
3622 }
3623 if (val != 0x10) {
3624 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3625 return -1;
3626 }
3627
3628 /* Wait until PRS register shows 1 packet */
3629 count = 1000 * factor;
3630 while (count) {
3631 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3632 if (val == 1)
3633 break;
3634
3635 msleep(10);
3636 count--;
3637 }
3638 if (val != 0x1) {
3639 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3640 return -2;
3641 }
3642
3643 /* Reset and init BRB, PRS */
3644 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3645 msleep(50);
3646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3647 msleep(50);
3648 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3649 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3650
3651 DP(NETIF_MSG_HW, "part2\n");
3652
3653 /* Disable inputs of parser neighbor blocks */
3654 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3655 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3656 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3657 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3658
3659 /* Write 0 to parser credits for CFC search request */
3660 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3661
3662 /* send 10 Ethernet packets */
3663 for (i = 0; i < 10; i++)
3664 bnx2x_lb_pckt(bp);
3665
3666 /* Wait until NIG register shows 10 + 1
3667 packets of size 11*0x10 = 0xb0 */
3668 count = 1000 * factor;
3669 while (count) {
3670
3671 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3672 val = *bnx2x_sp(bp, wb_data[0]);
3673 if (val == 0xb0)
3674 break;
3675
3676 msleep(10);
3677 count--;
3678 }
3679 if (val != 0xb0) {
3680 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3681 return -3;
3682 }
3683
3684 /* Wait until PRS register shows 2 packets */
3685 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3686 if (val != 2)
3687 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3688
3689 /* Write 1 to parser credits for CFC search request */
3690 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3691
3692 /* Wait until PRS register shows 3 packets */
3693 msleep(10 * factor);
3694 /* Wait until NIG register shows 1 packet of size 0x10 */
3695 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3696 if (val != 3)
3697 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3698
3699 /* clear NIG EOP FIFO */
3700 for (i = 0; i < 11; i++)
3701 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3702 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3703 if (val != 1) {
3704 BNX2X_ERR("clear of NIG failed\n");
3705 return -4;
3706 }
3707
3708 /* Reset and init BRB, PRS, NIG */
3709 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3710 msleep(50);
3711 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3712 msleep(50);
3713 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3714 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3715#ifndef BCM_CNIC
3716 /* set NIC mode */
3717 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3718#endif
3719
3720 /* Enable inputs of parser neighbor blocks */
3721 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3722 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3723 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3724 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3725
3726 DP(NETIF_MSG_HW, "done\n");
3727
3728 return 0; /* OK */
3729}
3730
3731static void enable_blocks_attention(struct bnx2x *bp)
3732{
3733 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3734 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3735 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3736 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3737 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3738 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3739 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3740 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3741 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3742/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3743/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3744 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3745 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3746 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3747/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3748/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3749 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3750 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3751 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3752 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3753/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3754/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3755 if (CHIP_REV_IS_FPGA(bp))
3756 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3757 else
3758 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3759 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3760 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3761 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3762/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3763/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3764 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3765 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3766/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3767 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
3768}
3769
3770static const struct {
3771 u32 addr;
3772 u32 mask;
3773} bnx2x_parity_mask[] = {
3774 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3775 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3776 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3777 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3778 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3779 {QM_REG_QM_PRTY_MASK, 0x0},
3780 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3781 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3782 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3783 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3784 {CDU_REG_CDU_PRTY_MASK, 0x0},
3785 {CFC_REG_CFC_PRTY_MASK, 0x0},
3786 {DBG_REG_DBG_PRTY_MASK, 0x0},
3787 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3788 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3789 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3790 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3791 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3792 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3793 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3794 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3795 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3796 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3797 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3798 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3799 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3800 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3801 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3802};
3803
3804static void enable_blocks_parity(struct bnx2x *bp)
3805{
3806 int i, mask_arr_len =
3807 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808
3809 for (i = 0; i < mask_arr_len; i++)
3810 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811 bnx2x_parity_mask[i].mask);
3812}
3813
3814
3815static void bnx2x_reset_common(struct bnx2x *bp)
3816{
3817 /* reset_common */
3818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3819 0xd3ffff7f);
3820 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3821}
3822
3823static void bnx2x_init_pxp(struct bnx2x *bp)
3824{
3825 u16 devctl;
3826 int r_order, w_order;
3827
3828 pci_read_config_word(bp->pdev,
3829 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3830 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3831 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3832 if (bp->mrrs == -1)
3833 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3834 else {
3835 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3836 r_order = bp->mrrs;
3837 }
3838
3839 bnx2x_init_pxp_arb(bp, r_order, w_order);
3840}
3841
3842static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3843{
3844 int is_required;
3845 u32 val;
3846 int port;
3847
3848 if (BP_NOMCP(bp))
3849 return;
3850
3851 is_required = 0;
3852 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3853 SHARED_HW_CFG_FAN_FAILURE_MASK;
3854
3855 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3856 is_required = 1;
3857
3858 /*
3859 * The fan failure mechanism is usually related to the PHY type since
3860 * the power consumption of the board is affected by the PHY. Currently,
3861 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3862 */
3863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864 for (port = PORT_0; port < PORT_MAX; port++) {
3865 u32 phy_type =
3866 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867 external_phy_config) &
3868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869 is_required |=
3870 ((phy_type ==
3871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3872 (phy_type ==
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3874 (phy_type ==
3875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876 }
3877
3878 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3879
3880 if (is_required == 0)
3881 return;
3882
3883 /* Fan failure is indicated by SPIO 5 */
3884 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3885 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3886
3887 /* set to active low mode */
3888 val = REG_RD(bp, MISC_REG_SPIO_INT);
3889 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3890 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3891 REG_WR(bp, MISC_REG_SPIO_INT, val);
3892
3893 /* enable interrupt to signal the IGU */
3894 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3895 val |= (1 << MISC_REGISTERS_SPIO_5);
3896 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3897}
3898
3899static int bnx2x_init_common(struct bnx2x *bp)
3900{
3901 u32 val, i;
3902#ifdef BCM_CNIC
3903 u32 wb_write[2];
3904#endif
3905
3906 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
3907
3908 bnx2x_reset_common(bp);
3909 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3911
3912 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3913 if (CHIP_IS_E1H(bp))
3914 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3915
3916 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3917 msleep(30);
3918 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3919
3920 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3921 if (CHIP_IS_E1(bp)) {
3922 /* enable HW interrupt from PXP on USDM overflow
3923 bit 16 on INT_MASK_0 */
3924 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3925 }
3926
3927 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3928 bnx2x_init_pxp(bp);
3929
3930#ifdef __BIG_ENDIAN
3931 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3932 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3933 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3934 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3935 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3936 /* make sure this value is 0 */
3937 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3938
3939/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3940 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3941 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3942 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3943 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3944#endif
3945
3946 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3947#ifdef BCM_CNIC
3948 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3949 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3950 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3951#endif
3952
3953 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3954 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3955
3956 /* let the HW do it's magic ... */
3957 msleep(100);
3958 /* finish PXP init */
3959 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3960 if (val != 1) {
3961 BNX2X_ERR("PXP2 CFG failed\n");
3962 return -EBUSY;
3963 }
3964 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3965 if (val != 1) {
3966 BNX2X_ERR("PXP2 RD_INIT failed\n");
3967 return -EBUSY;
3968 }
3969
3970 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3971 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3972
3973 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3974
3975 /* clean the DMAE memory */
3976 bp->dmae_ready = 1;
3977 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3978
3979 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3980 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3981 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3982 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3983
3984 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3985 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3986 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3987 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3988
3989 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3990
3991#ifdef BCM_CNIC
3992 wb_write[0] = 0;
3993 wb_write[1] = 0;
3994 for (i = 0; i < 64; i++) {
3995 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3996 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3997
3998 if (CHIP_IS_E1H(bp)) {
3999 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4000 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4001 wb_write, 2);
4002 }
4003 }
4004#endif
4005 /* soft reset pulse */
4006 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4007 REG_WR(bp, QM_REG_SOFT_RESET, 0);
4008
4009#ifdef BCM_CNIC
4010 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4011#endif
4012
4013 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4014 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4015 if (!CHIP_REV_IS_SLOW(bp)) {
4016 /* enable hw interrupt from doorbell Q */
4017 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4018 }
4019
4020 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4021 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4022 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4023#ifndef BCM_CNIC
4024 /* set NIC mode */
4025 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4026#endif
4027 if (CHIP_IS_E1H(bp))
4028 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4029
4030 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4031 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4032 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4033 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4034
4035 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4036 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4039
4040 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4041 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4042 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4043 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4044
4045 /* sync semi rtc */
4046 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4047 0x80000000);
4048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4049 0x80000000);
4050
4051 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4052 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4054
4055 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4056 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4057 REG_WR(bp, i, random32());
4058 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4059#ifdef BCM_CNIC
4060 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4061 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4062 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4063 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4064 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4065 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4066 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4067 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4068 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4069 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4070#endif
4071 REG_WR(bp, SRC_REG_SOFT_RST, 0);
4072
4073 if (sizeof(union cdu_context) != 1024)
4074 /* we currently assume that a context is 1024 bytes */
4075 dev_alert(&bp->pdev->dev, "please adjust the size "
4076 "of cdu_context(%ld)\n",
4077 (long)sizeof(union cdu_context));
4078
4079 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4080 val = (4 << 24) + (0 << 12) + 1024;
4081 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4082
4083 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4084 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4085 /* enable context validation interrupt from CFC */
4086 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4087
4088 /* set the thresholds to prevent CFC/CDU race */
4089 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4090
4091 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4092 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4093
4094 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4095 /* Reset PCIE errors for debug */
4096 REG_WR(bp, 0x2814, 0xffffffff);
4097 REG_WR(bp, 0x3820, 0xffffffff);
4098
4099 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4100 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4101 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4102 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4103
4104 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4105 if (CHIP_IS_E1H(bp)) {
4106 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4107 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4108 }
4109
4110 if (CHIP_REV_IS_SLOW(bp))
4111 msleep(200);
4112
4113 /* finish CFC init */
4114 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4115 if (val != 1) {
4116 BNX2X_ERR("CFC LL_INIT failed\n");
4117 return -EBUSY;
4118 }
4119 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4120 if (val != 1) {
4121 BNX2X_ERR("CFC AC_INIT failed\n");
4122 return -EBUSY;
4123 }
4124 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4125 if (val != 1) {
4126 BNX2X_ERR("CFC CAM_INIT failed\n");
4127 return -EBUSY;
4128 }
4129 REG_WR(bp, CFC_REG_DEBUG0, 0);
4130
4131 /* read NIG statistic
4132 to see if this is our first up since powerup */
4133 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4134 val = *bnx2x_sp(bp, wb_data[0]);
4135
4136 /* do internal memory self test */
4137 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4138 BNX2X_ERR("internal mem self test failed\n");
4139 return -EBUSY;
4140 }
4141
4142 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4147 bp->port.need_hw_lock = 1;
4148 break;
4149
4150 default:
4151 break;
4152 }
4153
4154 bnx2x_setup_fan_failure_detection(bp);
4155
4156 /* clear PXP2 attentions */
4157 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4158
4159 enable_blocks_attention(bp);
4160 if (CHIP_PARITY_SUPPORTED(bp))
4161 enable_blocks_parity(bp);
4162
4163 if (!BP_NOMCP(bp)) {
4164 bnx2x_acquire_phy_lock(bp);
4165 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4166 bnx2x_release_phy_lock(bp);
4167 } else
4168 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4169
4170 return 0;
4171}
4172
4173static int bnx2x_init_port(struct bnx2x *bp)
4174{
4175 int port = BP_PORT(bp);
4176 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4177 u32 low, high;
4178 u32 val;
4179
4180 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
4181
4182 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4183
4184 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4185 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4186
4187 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4188 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4189 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4190 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4191
4192#ifdef BCM_CNIC
4193 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4194
4195 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4196 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4197 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4198#endif
4199
4200 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4201
4202 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4203 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4204 /* no pause for emulation and FPGA */
4205 low = 0;
4206 high = 513;
4207 } else {
4208 if (IS_E1HMF(bp))
4209 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4210 else if (bp->dev->mtu > 4096) {
4211 if (bp->flags & ONE_PORT_FLAG)
4212 low = 160;
4213 else {
4214 val = bp->dev->mtu;
4215 /* (24*1024 + val*4)/256 */
4216 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4217 }
4218 } else
4219 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4220 high = low + 56; /* 14*1024/256 */
4221 }
4222 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4223 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4224
4225
4226 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4227
4228 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4229 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4230 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4231 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4232
4233 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4234 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4235 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4236 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4237
4238 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4239 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4240
4241 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4242
4243 /* configure PBF to work without PAUSE mtu 9000 */
4244 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4245
4246 /* update threshold */
4247 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4248 /* update init credit */
4249 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4250
4251 /* probe changes */
4252 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4253 msleep(5);
4254 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4255
4256#ifdef BCM_CNIC
4257 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4258#endif
4259 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4260 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4261
4262 if (CHIP_IS_E1(bp)) {
4263 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4264 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4265 }
4266 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4267
4268 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4269 /* init aeu_mask_attn_func_0/1:
4270 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4271 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4272 * bits 4-7 are used for "per vn group attention" */
4273 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4274 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4275
4276 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4277 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4278 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4279 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4280 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4281
4282 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4283
4284 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4285
4286 if (CHIP_IS_E1H(bp)) {
4287 /* 0x2 disable e1hov, 0x1 enable */
4288 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4289 (IS_E1HMF(bp) ? 0x1 : 0x2));
4290
4291 {
4292 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4293 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4294 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4295 }
4296 }
4297
4298 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4299 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4300
4301 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4303 {
4304 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4305
4306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309 /* The GPIO should be swapped if the swap register is
4310 set and active */
4311 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314 /* Select function upon port-swap configuration */
4315 if (port == 0) {
4316 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317 aeu_gpio_mask = (swap_val && swap_override) ?
4318 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320 } else {
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325 }
4326 val = REG_RD(bp, offset);
4327 /* add GPIO3 to group */
4328 val |= aeu_gpio_mask;
4329 REG_WR(bp, offset, val);
4330 }
4331 break;
4332
4333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4335 /* add SPIO 5 to group 0 */
4336 {
4337 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4338 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4339 val = REG_RD(bp, reg_addr);
4340 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4341 REG_WR(bp, reg_addr, val);
4342 }
4343 break;
4344
4345 default:
4346 break;
4347 }
4348
4349 bnx2x__link_reset(bp);
4350
4351 return 0;
4352}
4353
4354#define ILT_PER_FUNC (768/2)
4355#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4356/* the phys address is shifted right 12 bits and has an added
4357 1=valid bit added to the 53rd bit
4358 then since this is a wide register(TM)
4359 we split it into two 32 bit writes
4360 */
4361#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4362#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4363#define PXP_ONE_ILT(x) (((x) << 10) | x)
4364#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4365
4366#ifdef BCM_CNIC
4367#define CNIC_ILT_LINES 127
4368#define CNIC_CTX_PER_ILT 16
4369#else
4370#define CNIC_ILT_LINES 0
4371#endif
4372
4373static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4374{
4375 int reg;
4376
4377 if (CHIP_IS_E1H(bp))
4378 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4379 else /* E1 */
4380 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4381
4382 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4383}
4384
4385static int bnx2x_init_func(struct bnx2x *bp)
4386{
4387 int port = BP_PORT(bp);
4388 int func = BP_FUNC(bp);
4389 u32 addr, val;
4390 int i;
4391
4392 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4393
4394 /* set MSI reconfigure capability */
4395 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4396 val = REG_RD(bp, addr);
4397 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4398 REG_WR(bp, addr, val);
4399
4400 i = FUNC_ILT_BASE(func);
4401
4402 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4403 if (CHIP_IS_E1H(bp)) {
4404 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4405 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4406 } else /* E1 */
4407 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4408 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4409
4410#ifdef BCM_CNIC
4411 i += 1 + CNIC_ILT_LINES;
4412 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4413 if (CHIP_IS_E1(bp))
4414 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4415 else {
4416 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4417 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4418 }
4419
4420 i++;
4421 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4422 if (CHIP_IS_E1(bp))
4423 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4424 else {
4425 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4426 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4427 }
4428
4429 i++;
4430 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4431 if (CHIP_IS_E1(bp))
4432 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4433 else {
4434 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4435 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4436 }
4437
4438 /* tell the searcher where the T2 table is */
4439 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4440
4441 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4442 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4443
4444 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4445 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4446 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4447
4448 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4449#endif
4450
4451 if (CHIP_IS_E1H(bp)) {
4452 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4453 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4454 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4455 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4456 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4457 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4458 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4459 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4460 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4461
4462 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4463 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4464 }
4465
4466 /* HC init per function */
4467 if (CHIP_IS_E1H(bp)) {
4468 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4469
4470 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4471 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4472 }
4473 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4474
4475 /* Reset PCIE errors for debug */
4476 REG_WR(bp, 0x2114, 0xffffffff);
4477 REG_WR(bp, 0x2120, 0xffffffff);
4478
4479 return 0;
4480}
4481
4482int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4483{
4484 int i, rc = 0;
4485
4486 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4487 BP_FUNC(bp), load_code);
4488
4489 bp->dmae_ready = 0;
4490 mutex_init(&bp->dmae_mutex);
4491 rc = bnx2x_gunzip_init(bp);
4492 if (rc)
4493 return rc;
4494
4495 switch (load_code) {
4496 case FW_MSG_CODE_DRV_LOAD_COMMON:
4497 rc = bnx2x_init_common(bp);
4498 if (rc)
4499 goto init_hw_err;
4500 /* no break */
4501
4502 case FW_MSG_CODE_DRV_LOAD_PORT:
4503 bp->dmae_ready = 1;
4504 rc = bnx2x_init_port(bp);
4505 if (rc)
4506 goto init_hw_err;
4507 /* no break */
4508
4509 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4510 bp->dmae_ready = 1;
4511 rc = bnx2x_init_func(bp);
4512 if (rc)
4513 goto init_hw_err;
4514 break;
4515
4516 default:
4517 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4518 break;
4519 }
4520
4521 if (!BP_NOMCP(bp)) {
4522 int func = BP_FUNC(bp);
4523
4524 bp->fw_drv_pulse_wr_seq =
4525 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4526 DRV_PULSE_SEQ_MASK);
4527 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4528 }
4529
4530 /* this needs to be done before gunzip end */
4531 bnx2x_zero_def_sb(bp);
4532 for_each_queue(bp, i)
4533 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4534#ifdef BCM_CNIC
4535 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4536#endif
4537
4538init_hw_err:
4539 bnx2x_gunzip_end(bp);
4540
4541 return rc;
4542}
4543
4544void bnx2x_free_mem(struct bnx2x *bp)
4545{
4546
4547#define BNX2X_PCI_FREE(x, y, size) \
4548 do { \
4549 if (x) { \
4550 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4551 x = NULL; \
4552 y = 0; \
4553 } \
4554 } while (0)
4555
4556#define BNX2X_FREE(x) \
4557 do { \
4558 if (x) { \
4559 vfree(x); \
4560 x = NULL; \
4561 } \
4562 } while (0)
4563
4564 int i;
4565
4566 /* fastpath */
4567 /* Common */
4568 for_each_queue(bp, i) {
4569
4570 /* status blocks */
4571 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4572 bnx2x_fp(bp, i, status_blk_mapping),
4573 sizeof(struct host_status_block));
4574 }
4575 /* Rx */
4576 for_each_queue(bp, i) {
4577
4578 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4579 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4580 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4581 bnx2x_fp(bp, i, rx_desc_mapping),
4582 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4583
4584 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4585 bnx2x_fp(bp, i, rx_comp_mapping),
4586 sizeof(struct eth_fast_path_rx_cqe) *
4587 NUM_RCQ_BD);
4588
4589 /* SGE ring */
4590 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4591 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4592 bnx2x_fp(bp, i, rx_sge_mapping),
4593 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4594 }
4595 /* Tx */
4596 for_each_queue(bp, i) {
4597
4598 /* fastpath tx rings: tx_buf tx_desc */
4599 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4600 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4601 bnx2x_fp(bp, i, tx_desc_mapping),
4602 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4603 }
4604 /* end of fastpath */
4605
4606 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4607 sizeof(struct host_def_status_block));
4608
4609 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4610 sizeof(struct bnx2x_slowpath));
4611
4612#ifdef BCM_CNIC
4613 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4614 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4615 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4616 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4617 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4618 sizeof(struct host_status_block));
4619#endif
4620 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4621
4622#undef BNX2X_PCI_FREE
4623#undef BNX2X_KFREE
4624}
4625
4626int bnx2x_alloc_mem(struct bnx2x *bp)
4627{
4628
4629#define BNX2X_PCI_ALLOC(x, y, size) \
4630 do { \
4631 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4632 if (x == NULL) \
4633 goto alloc_mem_err; \
4634 memset(x, 0, size); \
4635 } while (0)
4636
4637#define BNX2X_ALLOC(x, size) \
4638 do { \
4639 x = vmalloc(size); \
4640 if (x == NULL) \
4641 goto alloc_mem_err; \
4642 memset(x, 0, size); \
4643 } while (0)
4644
4645 int i;
4646
4647 /* fastpath */
4648 /* Common */
4649 for_each_queue(bp, i) {
4650 bnx2x_fp(bp, i, bp) = bp;
4651
4652 /* status blocks */
4653 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4654 &bnx2x_fp(bp, i, status_blk_mapping),
4655 sizeof(struct host_status_block));
4656 }
4657 /* Rx */
4658 for_each_queue(bp, i) {
4659
4660 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4661 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4662 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4663 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4664 &bnx2x_fp(bp, i, rx_desc_mapping),
4665 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4666
4667 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4668 &bnx2x_fp(bp, i, rx_comp_mapping),
4669 sizeof(struct eth_fast_path_rx_cqe) *
4670 NUM_RCQ_BD);
4671
4672 /* SGE ring */
4673 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4674 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4676 &bnx2x_fp(bp, i, rx_sge_mapping),
4677 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4678 }
4679 /* Tx */
4680 for_each_queue(bp, i) {
4681
4682 /* fastpath tx rings: tx_buf tx_desc */
4683 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4684 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4685 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4686 &bnx2x_fp(bp, i, tx_desc_mapping),
4687 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4688 }
4689 /* end of fastpath */
4690
4691 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4692 sizeof(struct host_def_status_block));
4693
4694 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4695 sizeof(struct bnx2x_slowpath));
4696
4697#ifdef BCM_CNIC
4698 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4699
4700 /* allocate searcher T2 table
4701 we allocate 1/4 of alloc num for T2
4702 (which is not entered into the ILT) */
4703 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4704
4705 /* Initialize T2 (for 1024 connections) */
4706 for (i = 0; i < 16*1024; i += 64)
4707 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4708
4709 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4710 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4711
4712 /* QM queues (128*MAX_CONN) */
4713 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4714
4715 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4716 sizeof(struct host_status_block));
4717#endif
4718
4719 /* Slow path ring */
4720 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4721
4722 return 0;
4723
4724alloc_mem_err:
4725 bnx2x_free_mem(bp);
4726 return -ENOMEM;
4727
4728#undef BNX2X_PCI_ALLOC
4729#undef BNX2X_ALLOC
4730}
4731
4732
4733/*
4734 * Init service functions
4735 */
4736
4737/**
4738 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4739 *
4740 * @param bp driver descriptor
4741 * @param set set or clear an entry (1 or 0)
4742 * @param mac pointer to a buffer containing a MAC
4743 * @param cl_bit_vec bit vector of clients to register a MAC for
4744 * @param cam_offset offset in a CAM to use
4745 * @param with_bcast set broadcast MAC as well
4746 */
4747static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4748 u32 cl_bit_vec, u8 cam_offset,
4749 u8 with_bcast)
4750{
4751 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4752 int port = BP_PORT(bp);
4753
4754 /* CAM allocation
4755 * unicasts 0-31:port0 32-63:port1
4756 * multicast 64-127:port0 128-191:port1
4757 */
4758 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4759 config->hdr.offset = cam_offset;
4760 config->hdr.client_id = 0xff;
4761 config->hdr.reserved1 = 0;
4762
4763 /* primary MAC */
4764 config->config_table[0].cam_entry.msb_mac_addr =
4765 swab16(*(u16 *)&mac[0]);
4766 config->config_table[0].cam_entry.middle_mac_addr =
4767 swab16(*(u16 *)&mac[2]);
4768 config->config_table[0].cam_entry.lsb_mac_addr =
4769 swab16(*(u16 *)&mac[4]);
4770 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4771 if (set)
4772 config->config_table[0].target_table_entry.flags = 0;
4773 else
4774 CAM_INVALIDATE(config->config_table[0]);
4775 config->config_table[0].target_table_entry.clients_bit_vector =
4776 cpu_to_le32(cl_bit_vec);
4777 config->config_table[0].target_table_entry.vlan_id = 0;
4778
4779 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4780 (set ? "setting" : "clearing"),
4781 config->config_table[0].cam_entry.msb_mac_addr,
4782 config->config_table[0].cam_entry.middle_mac_addr,
4783 config->config_table[0].cam_entry.lsb_mac_addr);
4784
4785 /* broadcast */
4786 if (with_bcast) {
4787 config->config_table[1].cam_entry.msb_mac_addr =
4788 cpu_to_le16(0xffff);
4789 config->config_table[1].cam_entry.middle_mac_addr =
4790 cpu_to_le16(0xffff);
4791 config->config_table[1].cam_entry.lsb_mac_addr =
4792 cpu_to_le16(0xffff);
4793 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4794 if (set)
4795 config->config_table[1].target_table_entry.flags =
4796 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4797 else
4798 CAM_INVALIDATE(config->config_table[1]);
4799 config->config_table[1].target_table_entry.clients_bit_vector =
4800 cpu_to_le32(cl_bit_vec);
4801 config->config_table[1].target_table_entry.vlan_id = 0;
4802 }
4803
4804 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4805 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4806 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4807}
4808
4809/**
4810 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4811 *
4812 * @param bp driver descriptor
4813 * @param set set or clear an entry (1 or 0)
4814 * @param mac pointer to a buffer containing a MAC
4815 * @param cl_bit_vec bit vector of clients to register a MAC for
4816 * @param cam_offset offset in a CAM to use
4817 */
4818static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4819 u32 cl_bit_vec, u8 cam_offset)
4820{
4821 struct mac_configuration_cmd_e1h *config =
4822 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4823
4824 config->hdr.length = 1;
4825 config->hdr.offset = cam_offset;
4826 config->hdr.client_id = 0xff;
4827 config->hdr.reserved1 = 0;
4828
4829 /* primary MAC */
4830 config->config_table[0].msb_mac_addr =
4831 swab16(*(u16 *)&mac[0]);
4832 config->config_table[0].middle_mac_addr =
4833 swab16(*(u16 *)&mac[2]);
4834 config->config_table[0].lsb_mac_addr =
4835 swab16(*(u16 *)&mac[4]);
4836 config->config_table[0].clients_bit_vector =
4837 cpu_to_le32(cl_bit_vec);
4838 config->config_table[0].vlan_id = 0;
4839 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4840 if (set)
4841 config->config_table[0].flags = BP_PORT(bp);
4842 else
4843 config->config_table[0].flags =
4844 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4845
4846 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
4847 (set ? "setting" : "clearing"),
4848 config->config_table[0].msb_mac_addr,
4849 config->config_table[0].middle_mac_addr,
4850 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4851
4852 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4853 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4854 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4855}
4856
4857static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4858 int *state_p, int poll)
4859{
4860 /* can take a while if any port is running */
4861 int cnt = 5000;
4862
4863 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4864 poll ? "polling" : "waiting", state, idx);
4865
4866 might_sleep();
4867 while (cnt--) {
4868 if (poll) {
4869 bnx2x_rx_int(bp->fp, 10);
4870 /* if index is different from 0
4871 * the reply for some commands will
4872 * be on the non default queue
4873 */
4874 if (idx)
4875 bnx2x_rx_int(&bp->fp[idx], 10);
4876 }
4877
4878 mb(); /* state is changed by bnx2x_sp_event() */
4879 if (*state_p == state) {
4880#ifdef BNX2X_STOP_ON_ERROR
4881 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4882#endif
4883 return 0;
4884 }
4885
4886 msleep(1);
4887
4888 if (bp->panic)
4889 return -EIO;
4890 }
4891
4892 /* timeout! */
4893 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4894 poll ? "polling" : "waiting", state, idx);
4895#ifdef BNX2X_STOP_ON_ERROR
4896 bnx2x_panic();
4897#endif
4898
4899 return -EBUSY;
4900}
4901
4902void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4903{
4904 bp->set_mac_pending++;
4905 smp_wmb();
4906
4907 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4908 (1 << bp->fp->cl_id), BP_FUNC(bp));
4909
4910 /* Wait for a completion */
4911 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4912}
4913
4914void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4915{
4916 bp->set_mac_pending++;
4917 smp_wmb();
4918
4919 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4920 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4921 1);
4922
4923 /* Wait for a completion */
4924 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4925}
4926
4927#ifdef BCM_CNIC
4928/**
4929 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4930 * MAC(s). This function will wait until the ramdord completion
4931 * returns.
4932 *
4933 * @param bp driver handle
4934 * @param set set or clear the CAM entry
4935 *
4936 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4937 */
4938int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4939{
4940 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4941
4942 bp->set_mac_pending++;
4943 smp_wmb();
4944
4945 /* Send a SET_MAC ramrod */
4946 if (CHIP_IS_E1(bp))
4947 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4948 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4949 1);
4950 else
4951 /* CAM allocation for E1H
4952 * unicasts: by func number
4953 * multicast: 20+FUNC*20, 20 each
4954 */
4955 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4956 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4957
4958 /* Wait for a completion when setting */
4959 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4960
4961 return 0;
4962}
4963#endif
4964
4965int bnx2x_setup_leading(struct bnx2x *bp)
4966{
4967 int rc;
4968
4969 /* reset IGU state */
4970 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4971
4972 /* SETUP ramrod */
4973 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4974
4975 /* Wait for completion */
4976 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4977
4978 return rc;
4979}
4980
4981int bnx2x_setup_multi(struct bnx2x *bp, int index)
4982{
4983 struct bnx2x_fastpath *fp = &bp->fp[index];
4984
4985 /* reset IGU state */
4986 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4987
4988 /* SETUP ramrod */
4989 fp->state = BNX2X_FP_STATE_OPENING;
4990 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4991 fp->cl_id, 0);
4992
4993 /* Wait for completion */
4994 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4995 &(fp->state), 0);
4996}
4997
4998
4999void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5000{
5001
5002 switch (bp->multi_mode) {
5003 case ETH_RSS_MODE_DISABLED:
5004 bp->num_queues = 1;
5005 break;
5006
5007 case ETH_RSS_MODE_REGULAR:
5008 if (num_queues)
5009 bp->num_queues = min_t(u32, num_queues,
5010 BNX2X_MAX_QUEUES(bp));
5011 else
5012 bp->num_queues = min_t(u32, num_online_cpus(),
5013 BNX2X_MAX_QUEUES(bp));
5014 break;
5015
5016
5017 default:
5018 bp->num_queues = 1;
5019 break;
5020 }
5021}
5022
5023
5024
5025static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5026{
5027 struct bnx2x_fastpath *fp = &bp->fp[index];
5028 int rc;
5029
5030 /* halt the connection */
5031 fp->state = BNX2X_FP_STATE_HALTING;
5032 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5033
5034 /* Wait for completion */
5035 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5036 &(fp->state), 1);
5037 if (rc) /* timeout */
5038 return rc;
5039
5040 /* delete cfc entry */
5041 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5042
5043 /* Wait for completion */
5044 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5045 &(fp->state), 1);
5046 return rc;
5047}
5048
5049static int bnx2x_stop_leading(struct bnx2x *bp)
5050{
5051 __le16 dsb_sp_prod_idx;
5052 /* if the other port is handling traffic,
5053 this can take a lot of time */
5054 int cnt = 500;
5055 int rc;
5056
5057 might_sleep();
5058
5059 /* Send HALT ramrod */
5060 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5061 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5062
5063 /* Wait for completion */
5064 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5065 &(bp->fp[0].state), 1);
5066 if (rc) /* timeout */
5067 return rc;
5068
5069 dsb_sp_prod_idx = *bp->dsb_sp_prod;
5070
5071 /* Send PORT_DELETE ramrod */
5072 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5073
5074 /* Wait for completion to arrive on default status block
5075 we are going to reset the chip anyway
5076 so there is not much to do if this times out
5077 */
5078 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5079 if (!cnt) {
5080 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5081 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5082 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5083#ifdef BNX2X_STOP_ON_ERROR
5084 bnx2x_panic();
5085#endif
5086 rc = -EBUSY;
5087 break;
5088 }
5089 cnt--;
5090 msleep(1);
5091 rmb(); /* Refresh the dsb_sp_prod */
5092 }
5093 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5094 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5095
5096 return rc;
5097}
5098
5099static void bnx2x_reset_func(struct bnx2x *bp)
5100{
5101 int port = BP_PORT(bp);
5102 int func = BP_FUNC(bp);
5103 int base, i;
5104
5105 /* Configure IGU */
5106 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5107 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5108
5109#ifdef BCM_CNIC
5110 /* Disable Timer scan */
5111 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5112 /*
5113 * Wait for at least 10ms and up to 2 second for the timers scan to
5114 * complete
5115 */
5116 for (i = 0; i < 200; i++) {
5117 msleep(10);
5118 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5119 break;
5120 }
5121#endif
5122 /* Clear ILT */
5123 base = FUNC_ILT_BASE(func);
5124 for (i = base; i < base + ILT_PER_FUNC; i++)
5125 bnx2x_ilt_wr(bp, i, 0);
5126}
5127
5128static void bnx2x_reset_port(struct bnx2x *bp)
5129{
5130 int port = BP_PORT(bp);
5131 u32 val;
5132
5133 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5134
5135 /* Do not rcv packets to BRB */
5136 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5137 /* Do not direct rcv packets that are not for MCP to the BRB */
5138 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5139 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5140
5141 /* Configure AEU */
5142 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5143
5144 msleep(100);
5145 /* Check for BRB port occupancy */
5146 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5147 if (val)
5148 DP(NETIF_MSG_IFDOWN,
5149 "BRB1 is not empty %d blocks are occupied\n", val);
5150
5151 /* TODO: Close Doorbell port? */
5152}
5153
5154static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5155{
5156 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5157 BP_FUNC(bp), reset_code);
5158
5159 switch (reset_code) {
5160 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5161 bnx2x_reset_port(bp);
5162 bnx2x_reset_func(bp);
5163 bnx2x_reset_common(bp);
5164 break;
5165
5166 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5167 bnx2x_reset_port(bp);
5168 bnx2x_reset_func(bp);
5169 break;
5170
5171 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5172 bnx2x_reset_func(bp);
5173 break;
5174
5175 default:
5176 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5177 break;
5178 }
5179}
5180
5181void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5182{
5183 int port = BP_PORT(bp);
5184 u32 reset_code = 0;
5185 int i, cnt, rc;
5186
5187 /* Wait until tx fastpath tasks complete */
5188 for_each_queue(bp, i) {
5189 struct bnx2x_fastpath *fp = &bp->fp[i];
5190
5191 cnt = 1000;
5192 while (bnx2x_has_tx_work_unload(fp)) {
5193
5194 bnx2x_tx_int(fp);
5195 if (!cnt) {
5196 BNX2X_ERR("timeout waiting for queue[%d]\n",
5197 i);
5198#ifdef BNX2X_STOP_ON_ERROR
5199 bnx2x_panic();
5200 return -EBUSY;
5201#else
5202 break;
5203#endif
5204 }
5205 cnt--;
5206 msleep(1);
5207 }
5208 }
5209 /* Give HW time to discard old tx messages */
5210 msleep(1);
5211
5212 if (CHIP_IS_E1(bp)) {
5213 struct mac_configuration_cmd *config =
5214 bnx2x_sp(bp, mcast_config);
5215
5216 bnx2x_set_eth_mac_addr_e1(bp, 0);
5217
5218 for (i = 0; i < config->hdr.length; i++)
5219 CAM_INVALIDATE(config->config_table[i]);
5220
5221 config->hdr.length = i;
5222 if (CHIP_REV_IS_SLOW(bp))
5223 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5224 else
5225 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5226 config->hdr.client_id = bp->fp->cl_id;
5227 config->hdr.reserved1 = 0;
5228
5229 bp->set_mac_pending++;
5230 smp_wmb();
5231
5232 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5233 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5234 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5235
5236 } else { /* E1H */
5237 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5238
5239 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5240
5241 for (i = 0; i < MC_HASH_SIZE; i++)
5242 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5243
5244 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5245 }
5246#ifdef BCM_CNIC
5247 /* Clear iSCSI L2 MAC */
5248 mutex_lock(&bp->cnic_mutex);
5249 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5250 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5251 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5252 }
5253 mutex_unlock(&bp->cnic_mutex);
5254#endif
5255
5256 if (unload_mode == UNLOAD_NORMAL)
5257 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5258
5259 else if (bp->flags & NO_WOL_FLAG)
5260 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5261
5262 else if (bp->wol) {
5263 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5264 u8 *mac_addr = bp->dev->dev_addr;
5265 u32 val;
5266 /* The mac address is written to entries 1-4 to
5267 preserve entry 0 which is used by the PMF */
5268 u8 entry = (BP_E1HVN(bp) + 1)*8;
5269
5270 val = (mac_addr[0] << 8) | mac_addr[1];
5271 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5272
5273 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5274 (mac_addr[4] << 8) | mac_addr[5];
5275 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5276
5277 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5278
5279 } else
5280 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5281
5282 /* Close multi and leading connections
5283 Completions for ramrods are collected in a synchronous way */
5284 for_each_nondefault_queue(bp, i)
5285 if (bnx2x_stop_multi(bp, i))
5286 goto unload_error;
5287
5288 rc = bnx2x_stop_leading(bp);
5289 if (rc) {
5290 BNX2X_ERR("Stop leading failed!\n");
5291#ifdef BNX2X_STOP_ON_ERROR
5292 return -EBUSY;
5293#else
5294 goto unload_error;
5295#endif
5296 }
5297
5298unload_error:
5299 if (!BP_NOMCP(bp))
5300 reset_code = bnx2x_fw_command(bp, reset_code);
5301 else {
5302 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
5303 load_count[0], load_count[1], load_count[2]);
5304 load_count[0]--;
5305 load_count[1 + port]--;
5306 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
5307 load_count[0], load_count[1], load_count[2]);
5308 if (load_count[0] == 0)
5309 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5310 else if (load_count[1 + port] == 0)
5311 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5312 else
5313 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5314 }
5315
5316 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5317 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5318 bnx2x__link_reset(bp);
5319
5320 /* Reset the chip */
5321 bnx2x_reset_chip(bp, reset_code);
5322
5323 /* Report UNLOAD_DONE to MCP */
5324 if (!BP_NOMCP(bp))
5325 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5326
5327}
5328
5329void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5330{
5331 u32 val;
5332
5333 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5334
5335 if (CHIP_IS_E1(bp)) {
5336 int port = BP_PORT(bp);
5337 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5338 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5339
5340 val = REG_RD(bp, addr);
5341 val &= ~(0x300);
5342 REG_WR(bp, addr, val);
5343 } else if (CHIP_IS_E1H(bp)) {
5344 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5345 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5346 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5347 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5348 }
5349}
5350
5351
5352/* Close gates #2, #3 and #4: */
5353static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5354{
5355 u32 val, addr;
5356
5357 /* Gates #2 and #4a are closed/opened for "not E1" only */
5358 if (!CHIP_IS_E1(bp)) {
5359 /* #4 */
5360 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5361 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5362 close ? (val | 0x1) : (val & (~(u32)1)));
5363 /* #2 */
5364 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5365 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5366 close ? (val | 0x1) : (val & (~(u32)1)));
5367 }
5368
5369 /* #3 */
5370 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5371 val = REG_RD(bp, addr);
5372 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5373
5374 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5375 close ? "closing" : "opening");
5376 mmiowb();
5377}
5378
5379#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5380
5381static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5382{
5383 /* Do some magic... */
5384 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5385 *magic_val = val & SHARED_MF_CLP_MAGIC;
5386 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5387}
5388
5389/* Restore the value of the `magic' bit.
5390 *
5391 * @param pdev Device handle.
5392 * @param magic_val Old value of the `magic' bit.
5393 */
5394static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5395{
5396 /* Restore the `magic' bit value... */
5397 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5398 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5399 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5400 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5401 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5402 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5403}
5404
5405/* Prepares for MCP reset: takes care of CLP configurations.
5406 *
5407 * @param bp
5408 * @param magic_val Old value of 'magic' bit.
5409 */
5410static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5411{
5412 u32 shmem;
5413 u32 validity_offset;
5414
5415 DP(NETIF_MSG_HW, "Starting\n");
5416
5417 /* Set `magic' bit in order to save MF config */
5418 if (!CHIP_IS_E1(bp))
5419 bnx2x_clp_reset_prep(bp, magic_val);
5420
5421 /* Get shmem offset */
5422 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5423 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5424
5425 /* Clear validity map flags */
5426 if (shmem > 0)
5427 REG_WR(bp, shmem + validity_offset, 0);
5428}
5429
5430#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5431#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5432
5433/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5434 * depending on the HW type.
5435 *
5436 * @param bp
5437 */
5438static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5439{
5440 /* special handling for emulation and FPGA,
5441 wait 10 times longer */
5442 if (CHIP_REV_IS_SLOW(bp))
5443 msleep(MCP_ONE_TIMEOUT*10);
5444 else
5445 msleep(MCP_ONE_TIMEOUT);
5446}
5447
5448static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5449{
5450 u32 shmem, cnt, validity_offset, val;
5451 int rc = 0;
5452
5453 msleep(100);
5454
5455 /* Get shmem offset */
5456 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5457 if (shmem == 0) {
5458 BNX2X_ERR("Shmem 0 return failure\n");
5459 rc = -ENOTTY;
5460 goto exit_lbl;
5461 }
5462
5463 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5464
5465 /* Wait for MCP to come up */
5466 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5467 /* TBD: its best to check validity map of last port.
5468 * currently checks on port 0.
5469 */
5470 val = REG_RD(bp, shmem + validity_offset);
5471 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5472 shmem + validity_offset, val);
5473
5474 /* check that shared memory is valid. */
5475 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5476 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5477 break;
5478
5479 bnx2x_mcp_wait_one(bp);
5480 }
5481
5482 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5483
5484 /* Check that shared memory is valid. This indicates that MCP is up. */
5485 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5486 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5487 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5488 rc = -ENOTTY;
5489 goto exit_lbl;
5490 }
5491
5492exit_lbl:
5493 /* Restore the `magic' bit value */
5494 if (!CHIP_IS_E1(bp))
5495 bnx2x_clp_reset_done(bp, magic_val);
5496
5497 return rc;
5498}
5499
5500static void bnx2x_pxp_prep(struct bnx2x *bp)
5501{
5502 if (!CHIP_IS_E1(bp)) {
5503 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5504 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5505 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5506 mmiowb();
5507 }
5508}
5509
5510/*
5511 * Reset the whole chip except for:
5512 * - PCIE core
5513 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5514 * one reset bit)
5515 * - IGU
5516 * - MISC (including AEU)
5517 * - GRC
5518 * - RBCN, RBCP
5519 */
5520static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5521{
5522 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5523
5524 not_reset_mask1 =
5525 MISC_REGISTERS_RESET_REG_1_RST_HC |
5526 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5527 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5528
5529 not_reset_mask2 =
5530 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5531 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5532 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5533 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5534 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5535 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5536 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5537 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5538
5539 reset_mask1 = 0xffffffff;
5540
5541 if (CHIP_IS_E1(bp))
5542 reset_mask2 = 0xffff;
5543 else
5544 reset_mask2 = 0x1ffff;
5545
5546 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5547 reset_mask1 & (~not_reset_mask1));
5548 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5549 reset_mask2 & (~not_reset_mask2));
5550
5551 barrier();
5552 mmiowb();
5553
5554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5555 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5556 mmiowb();
5557}
5558
5559static int bnx2x_process_kill(struct bnx2x *bp)
5560{
5561 int cnt = 1000;
5562 u32 val = 0;
5563 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5564
5565
5566 /* Empty the Tetris buffer, wait for 1s */
5567 do {
5568 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5569 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5570 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5571 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5572 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5573 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5574 ((port_is_idle_0 & 0x1) == 0x1) &&
5575 ((port_is_idle_1 & 0x1) == 0x1) &&
5576 (pgl_exp_rom2 == 0xffffffff))
5577 break;
5578 msleep(1);
5579 } while (cnt-- > 0);
5580
5581 if (cnt <= 0) {
5582 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5583 " are still"
5584 " outstanding read requests after 1s!\n");
5585 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5586 " port_is_idle_0=0x%08x,"
5587 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5588 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5589 pgl_exp_rom2);
5590 return -EAGAIN;
5591 }
5592
5593 barrier();
5594
5595 /* Close gates #2, #3 and #4 */
5596 bnx2x_set_234_gates(bp, true);
5597
5598 /* TBD: Indicate that "process kill" is in progress to MCP */
5599
5600 /* Clear "unprepared" bit */
5601 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5602 barrier();
5603
5604 /* Make sure all is written to the chip before the reset */
5605 mmiowb();
5606
5607 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5608 * PSWHST, GRC and PSWRD Tetris buffer.
5609 */
5610 msleep(1);
5611
5612 /* Prepare to chip reset: */
5613 /* MCP */
5614 bnx2x_reset_mcp_prep(bp, &val);
5615
5616 /* PXP */
5617 bnx2x_pxp_prep(bp);
5618 barrier();
5619
5620 /* reset the chip */
5621 bnx2x_process_kill_chip_reset(bp);
5622 barrier();
5623
5624 /* Recover after reset: */
5625 /* MCP */
5626 if (bnx2x_reset_mcp_comp(bp, val))
5627 return -EAGAIN;
5628
5629 /* PXP */
5630 bnx2x_pxp_prep(bp);
5631
5632 /* Open the gates #2, #3 and #4 */
5633 bnx2x_set_234_gates(bp, false);
5634
5635 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5636 * reset state, re-enable attentions. */
5637
5638 return 0;
5639}
5640
5641static int bnx2x_leader_reset(struct bnx2x *bp)
5642{
5643 int rc = 0;
5644 /* Try to recover after the failure */
5645 if (bnx2x_process_kill(bp)) {
5646 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5647 bp->dev->name);
5648 rc = -EAGAIN;
5649 goto exit_leader_reset;
5650 }
5651
5652 /* Clear "reset is in progress" bit and update the driver state */
5653 bnx2x_set_reset_done(bp);
5654 bp->recovery_state = BNX2X_RECOVERY_DONE;
5655
5656exit_leader_reset:
5657 bp->is_leader = 0;
5658 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5659 smp_wmb();
5660 return rc;
5661}
5662
5663/* Assumption: runs under rtnl lock. This together with the fact
5664 * that it's called only from bnx2x_reset_task() ensure that it
5665 * will never be called when netif_running(bp->dev) is false.
5666 */
5667static void bnx2x_parity_recover(struct bnx2x *bp)
5668{
5669 DP(NETIF_MSG_HW, "Handling parity\n");
5670 while (1) {
5671 switch (bp->recovery_state) {
5672 case BNX2X_RECOVERY_INIT:
5673 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5674 /* Try to get a LEADER_LOCK HW lock */
5675 if (bnx2x_trylock_hw_lock(bp,
5676 HW_LOCK_RESOURCE_RESERVED_08))
5677 bp->is_leader = 1;
5678
5679 /* Stop the driver */
5680 /* If interface has been removed - break */
5681 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5682 return;
5683
5684 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5685 /* Ensure "is_leader" and "recovery_state"
5686 * update values are seen on other CPUs
5687 */
5688 smp_wmb();
5689 break;
5690
5691 case BNX2X_RECOVERY_WAIT:
5692 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5693 if (bp->is_leader) {
5694 u32 load_counter = bnx2x_get_load_cnt(bp);
5695 if (load_counter) {
5696 /* Wait until all other functions get
5697 * down.
5698 */
5699 schedule_delayed_work(&bp->reset_task,
5700 HZ/10);
5701 return;
5702 } else {
5703 /* If all other functions got down -
5704 * try to bring the chip back to
5705 * normal. In any case it's an exit
5706 * point for a leader.
5707 */
5708 if (bnx2x_leader_reset(bp) ||
5709 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5710 printk(KERN_ERR"%s: Recovery "
5711 "has failed. Power cycle is "
5712 "needed.\n", bp->dev->name);
5713 /* Disconnect this device */
5714 netif_device_detach(bp->dev);
5715 /* Block ifup for all function
5716 * of this ASIC until
5717 * "process kill" or power
5718 * cycle.
5719 */
5720 bnx2x_set_reset_in_progress(bp);
5721 /* Shut down the power */
5722 bnx2x_set_power_state(bp,
5723 PCI_D3hot);
5724 return;
5725 }
5726
5727 return;
5728 }
5729 } else { /* non-leader */
5730 if (!bnx2x_reset_is_done(bp)) {
5731 /* Try to get a LEADER_LOCK HW lock as
5732 * long as a former leader may have
5733 * been unloaded by the user or
5734 * released a leadership by another
5735 * reason.
5736 */
5737 if (bnx2x_trylock_hw_lock(bp,
5738 HW_LOCK_RESOURCE_RESERVED_08)) {
5739 /* I'm a leader now! Restart a
5740 * switch case.
5741 */
5742 bp->is_leader = 1;
5743 break;
5744 }
5745
5746 schedule_delayed_work(&bp->reset_task,
5747 HZ/10);
5748 return;
5749
5750 } else { /* A leader has completed
5751 * the "process kill". It's an exit
5752 * point for a non-leader.
5753 */
5754 bnx2x_nic_load(bp, LOAD_NORMAL);
5755 bp->recovery_state =
5756 BNX2X_RECOVERY_DONE;
5757 smp_wmb();
5758 return;
5759 }
5760 }
5761 default:
5762 return;
5763 }
5764 }
5765}
5766
5767/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5768 * scheduled on a general queue in order to prevent a dead lock.
5769 */
5770static void bnx2x_reset_task(struct work_struct *work)
5771{
5772 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5773
5774#ifdef BNX2X_STOP_ON_ERROR
5775 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5776 " so reset not done to allow debug dump,\n"
5777 KERN_ERR " you will need to reboot when done\n");
5778 return;
5779#endif
5780
5781 rtnl_lock();
5782
5783 if (!netif_running(bp->dev))
5784 goto reset_task_exit;
5785
5786 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5787 bnx2x_parity_recover(bp);
5788 else {
5789 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5790 bnx2x_nic_load(bp, LOAD_NORMAL);
5791 }
5792
5793reset_task_exit:
5794 rtnl_unlock();
5795}
5796
5797/* end of nic load/unload */
5798
5799/*
5800 * Init service functions
5801 */
5802
5803static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5804{
5805 switch (func) {
5806 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5807 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5808 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5809 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5810 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5811 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5812 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5813 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5814 default:
5815 BNX2X_ERR("Unsupported function index: %d\n", func);
5816 return (u32)(-1);
5817 }
5818}
5819
5820static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5821{
5822 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5823
5824 /* Flush all outstanding writes */
5825 mmiowb();
5826
5827 /* Pretend to be function 0 */
5828 REG_WR(bp, reg, 0);
5829 /* Flush the GRC transaction (in the chip) */
5830 new_val = REG_RD(bp, reg);
5831 if (new_val != 0) {
5832 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5833 new_val);
5834 BUG();
5835 }
5836
5837 /* From now we are in the "like-E1" mode */
5838 bnx2x_int_disable(bp);
5839
5840 /* Flush all outstanding writes */
5841 mmiowb();
5842
5843 /* Restore the original funtion settings */
5844 REG_WR(bp, reg, orig_func);
5845 new_val = REG_RD(bp, reg);
5846 if (new_val != orig_func) {
5847 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5848 orig_func, new_val);
5849 BUG();
5850 }
5851}
5852
5853static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5854{
5855 if (CHIP_IS_E1H(bp))
5856 bnx2x_undi_int_disable_e1h(bp, func);
5857 else
5858 bnx2x_int_disable(bp);
5859}
5860
5861static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5862{
5863 u32 val;
5864
5865 /* Check if there is any driver already loaded */
5866 val = REG_RD(bp, MISC_REG_UNPREPARED);
5867 if (val == 0x1) {
5868 /* Check if it is the UNDI driver
5869 * UNDI driver initializes CID offset for normal bell to 0x7
5870 */
5871 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5872 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5873 if (val == 0x7) {
5874 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5875 /* save our func */
5876 int func = BP_FUNC(bp);
5877 u32 swap_en;
5878 u32 swap_val;
5879
5880 /* clear the UNDI indication */
5881 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5882
5883 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5884
5885 /* try unload UNDI on port 0 */
5886 bp->func = 0;
5887 bp->fw_seq =
5888 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5889 DRV_MSG_SEQ_NUMBER_MASK);
5890 reset_code = bnx2x_fw_command(bp, reset_code);
5891
5892 /* if UNDI is loaded on the other port */
5893 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5894
5895 /* send "DONE" for previous unload */
5896 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5897
5898 /* unload UNDI on port 1 */
5899 bp->func = 1;
5900 bp->fw_seq =
5901 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5902 DRV_MSG_SEQ_NUMBER_MASK);
5903 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5904
5905 bnx2x_fw_command(bp, reset_code);
5906 }
5907
5908 /* now it's safe to release the lock */
5909 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5910
5911 bnx2x_undi_int_disable(bp, func);
5912
5913 /* close input traffic and wait for it */
5914 /* Do not rcv packets to BRB */
5915 REG_WR(bp,
5916 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5917 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5918 /* Do not direct rcv packets that are not for MCP to
5919 * the BRB */
5920 REG_WR(bp,
5921 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5922 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5923 /* clear AEU */
5924 REG_WR(bp,
5925 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5926 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5927 msleep(10);
5928
5929 /* save NIG port swap info */
5930 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5931 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5932 /* reset device */
5933 REG_WR(bp,
5934 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5935 0xd3ffffff);
5936 REG_WR(bp,
5937 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5938 0x1403);
5939 /* take the NIG out of reset and restore swap values */
5940 REG_WR(bp,
5941 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5942 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5943 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5944 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5945
5946 /* send unload done to the MCP */
5947 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5948
5949 /* restore our func and fw_seq */
5950 bp->func = func;
5951 bp->fw_seq =
5952 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5953 DRV_MSG_SEQ_NUMBER_MASK);
5954
5955 } else
5956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5957 }
5958}
5959
5960static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5961{
5962 u32 val, val2, val3, val4, id;
5963 u16 pmc;
5964
5965 /* Get the chip revision id and number. */
5966 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5967 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5968 id = ((val & 0xffff) << 16);
5969 val = REG_RD(bp, MISC_REG_CHIP_REV);
5970 id |= ((val & 0xf) << 12);
5971 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5972 id |= ((val & 0xff) << 4);
5973 val = REG_RD(bp, MISC_REG_BOND_ID);
5974 id |= (val & 0xf);
5975 bp->common.chip_id = id;
5976 bp->link_params.chip_id = bp->common.chip_id;
5977 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5978
5979 val = (REG_RD(bp, 0x2874) & 0x55);
5980 if ((bp->common.chip_id & 0x1) ||
5981 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5982 bp->flags |= ONE_PORT_FLAG;
5983 BNX2X_DEV_INFO("single port device\n");
5984 }
5985
5986 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5987 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5988 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5989 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5990 bp->common.flash_size, bp->common.flash_size);
5991
5992 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5993 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5994 bp->link_params.shmem_base = bp->common.shmem_base;
5995 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5996 bp->common.shmem_base, bp->common.shmem2_base);
5997
5998 if (!bp->common.shmem_base ||
5999 (bp->common.shmem_base < 0xA0000) ||
6000 (bp->common.shmem_base >= 0xC0000)) {
6001 BNX2X_DEV_INFO("MCP not active\n");
6002 bp->flags |= NO_MCP_FLAG;
6003 return;
6004 }
6005
6006 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6007 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6008 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6009 BNX2X_ERROR("BAD MCP validity signature\n");
6010
6011 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6012 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6013
6014 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6015 SHARED_HW_CFG_LED_MODE_MASK) >>
6016 SHARED_HW_CFG_LED_MODE_SHIFT);
6017
6018 bp->link_params.feature_config_flags = 0;
6019 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6020 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6021 bp->link_params.feature_config_flags |=
6022 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6023 else
6024 bp->link_params.feature_config_flags &=
6025 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6026
6027 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6028 bp->common.bc_ver = val;
6029 BNX2X_DEV_INFO("bc_ver %X\n", val);
6030 if (val < BNX2X_BC_VER) {
6031 /* for now only warn
6032 * later we might need to enforce this */
6033 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6034 "please upgrade BC\n", BNX2X_BC_VER, val);
6035 }
6036 bp->link_params.feature_config_flags |=
6037 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6038 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6039
6040 if (BP_E1HVN(bp) == 0) {
6041 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6042 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6043 } else {
6044 /* no WOL capability for E1HVN != 0 */
6045 bp->flags |= NO_WOL_FLAG;
6046 }
6047 BNX2X_DEV_INFO("%sWoL capable\n",
6048 (bp->flags & NO_WOL_FLAG) ? "not " : "");
6049
6050 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6051 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6052 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6053 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6054
6055 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6056 val, val2, val3, val4);
6057}
6058
6059static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6060 u32 switch_cfg)
6061{
6062 int port = BP_PORT(bp);
6063 u32 ext_phy_type;
6064
6065 switch (switch_cfg) {
6066 case SWITCH_CFG_1G:
6067 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6068
6069 ext_phy_type =
6070 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6071 switch (ext_phy_type) {
6072 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6073 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6074 ext_phy_type);
6075
6076 bp->port.supported |= (SUPPORTED_10baseT_Half |
6077 SUPPORTED_10baseT_Full |
6078 SUPPORTED_100baseT_Half |
6079 SUPPORTED_100baseT_Full |
6080 SUPPORTED_1000baseT_Full |
6081 SUPPORTED_2500baseX_Full |
6082 SUPPORTED_TP |
6083 SUPPORTED_FIBRE |
6084 SUPPORTED_Autoneg |
6085 SUPPORTED_Pause |
6086 SUPPORTED_Asym_Pause);
6087 break;
6088
6089 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6090 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6091 ext_phy_type);
6092
6093 bp->port.supported |= (SUPPORTED_10baseT_Half |
6094 SUPPORTED_10baseT_Full |
6095 SUPPORTED_100baseT_Half |
6096 SUPPORTED_100baseT_Full |
6097 SUPPORTED_1000baseT_Full |
6098 SUPPORTED_TP |
6099 SUPPORTED_FIBRE |
6100 SUPPORTED_Autoneg |
6101 SUPPORTED_Pause |
6102 SUPPORTED_Asym_Pause);
6103 break;
6104
6105 default:
6106 BNX2X_ERR("NVRAM config error. "
6107 "BAD SerDes ext_phy_config 0x%x\n",
6108 bp->link_params.ext_phy_config);
6109 return;
6110 }
6111
6112 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6113 port*0x10);
6114 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6115 break;
6116
6117 case SWITCH_CFG_10G:
6118 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6119
6120 ext_phy_type =
6121 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6122 switch (ext_phy_type) {
6123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6124 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6125 ext_phy_type);
6126
6127 bp->port.supported |= (SUPPORTED_10baseT_Half |
6128 SUPPORTED_10baseT_Full |
6129 SUPPORTED_100baseT_Half |
6130 SUPPORTED_100baseT_Full |
6131 SUPPORTED_1000baseT_Full |
6132 SUPPORTED_2500baseX_Full |
6133 SUPPORTED_10000baseT_Full |
6134 SUPPORTED_TP |
6135 SUPPORTED_FIBRE |
6136 SUPPORTED_Autoneg |
6137 SUPPORTED_Pause |
6138 SUPPORTED_Asym_Pause);
6139 break;
6140
6141 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6142 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6143 ext_phy_type);
6144
6145 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6146 SUPPORTED_1000baseT_Full |
6147 SUPPORTED_FIBRE |
6148 SUPPORTED_Autoneg |
6149 SUPPORTED_Pause |
6150 SUPPORTED_Asym_Pause);
6151 break;
6152
6153 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6154 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6155 ext_phy_type);
6156
6157 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6158 SUPPORTED_2500baseX_Full |
6159 SUPPORTED_1000baseT_Full |
6160 SUPPORTED_FIBRE |
6161 SUPPORTED_Autoneg |
6162 SUPPORTED_Pause |
6163 SUPPORTED_Asym_Pause);
6164 break;
6165
6166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6167 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6168 ext_phy_type);
6169
6170 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6171 SUPPORTED_FIBRE |
6172 SUPPORTED_Pause |
6173 SUPPORTED_Asym_Pause);
6174 break;
6175
6176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6177 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6178 ext_phy_type);
6179
6180 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6181 SUPPORTED_1000baseT_Full |
6182 SUPPORTED_FIBRE |
6183 SUPPORTED_Pause |
6184 SUPPORTED_Asym_Pause);
6185 break;
6186
6187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6188 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6189 ext_phy_type);
6190
6191 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6192 SUPPORTED_1000baseT_Full |
6193 SUPPORTED_Autoneg |
6194 SUPPORTED_FIBRE |
6195 SUPPORTED_Pause |
6196 SUPPORTED_Asym_Pause);
6197 break;
6198
6199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6200 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6201 ext_phy_type);
6202
6203 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6204 SUPPORTED_1000baseT_Full |
6205 SUPPORTED_Autoneg |
6206 SUPPORTED_FIBRE |
6207 SUPPORTED_Pause |
6208 SUPPORTED_Asym_Pause);
6209 break;
6210
6211 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6212 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6213 ext_phy_type);
6214
6215 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6216 SUPPORTED_TP |
6217 SUPPORTED_Autoneg |
6218 SUPPORTED_Pause |
6219 SUPPORTED_Asym_Pause);
6220 break;
6221
6222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6223 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6224 ext_phy_type);
6225
6226 bp->port.supported |= (SUPPORTED_10baseT_Half |
6227 SUPPORTED_10baseT_Full |
6228 SUPPORTED_100baseT_Half |
6229 SUPPORTED_100baseT_Full |
6230 SUPPORTED_1000baseT_Full |
6231 SUPPORTED_10000baseT_Full |
6232 SUPPORTED_TP |
6233 SUPPORTED_Autoneg |
6234 SUPPORTED_Pause |
6235 SUPPORTED_Asym_Pause);
6236 break;
6237
6238 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6239 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6240 bp->link_params.ext_phy_config);
6241 break;
6242
6243 default:
6244 BNX2X_ERR("NVRAM config error. "
6245 "BAD XGXS ext_phy_config 0x%x\n",
6246 bp->link_params.ext_phy_config);
6247 return;
6248 }
6249
6250 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6251 port*0x18);
6252 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6253
6254 break;
6255
6256 default:
6257 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6258 bp->port.link_config);
6259 return;
6260 }
6261 bp->link_params.phy_addr = bp->port.phy_addr;
6262
6263 /* mask what we support according to speed_cap_mask */
6264 if (!(bp->link_params.speed_cap_mask &
6265 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6266 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6267
6268 if (!(bp->link_params.speed_cap_mask &
6269 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6270 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6271
6272 if (!(bp->link_params.speed_cap_mask &
6273 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6274 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6275
6276 if (!(bp->link_params.speed_cap_mask &
6277 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6278 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6279
6280 if (!(bp->link_params.speed_cap_mask &
6281 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6282 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6283 SUPPORTED_1000baseT_Full);
6284
6285 if (!(bp->link_params.speed_cap_mask &
6286 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6287 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6288
6289 if (!(bp->link_params.speed_cap_mask &
6290 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6291 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6292
6293 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6294}
6295
6296static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6297{
6298 bp->link_params.req_duplex = DUPLEX_FULL;
6299
6300 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6301 case PORT_FEATURE_LINK_SPEED_AUTO:
6302 if (bp->port.supported & SUPPORTED_Autoneg) {
6303 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6304 bp->port.advertising = bp->port.supported;
6305 } else {
6306 u32 ext_phy_type =
6307 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6308
6309 if ((ext_phy_type ==
6310 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6311 (ext_phy_type ==
6312 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6313 /* force 10G, no AN */
6314 bp->link_params.req_line_speed = SPEED_10000;
6315 bp->port.advertising =
6316 (ADVERTISED_10000baseT_Full |
6317 ADVERTISED_FIBRE);
6318 break;
6319 }
6320 BNX2X_ERR("NVRAM config error. "
6321 "Invalid link_config 0x%x"
6322 " Autoneg not supported\n",
6323 bp->port.link_config);
6324 return;
6325 }
6326 break;
6327
6328 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6329 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6330 bp->link_params.req_line_speed = SPEED_10;
6331 bp->port.advertising = (ADVERTISED_10baseT_Full |
6332 ADVERTISED_TP);
6333 } else {
6334 BNX2X_ERROR("NVRAM config error. "
6335 "Invalid link_config 0x%x"
6336 " speed_cap_mask 0x%x\n",
6337 bp->port.link_config,
6338 bp->link_params.speed_cap_mask);
6339 return;
6340 }
6341 break;
6342
6343 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6344 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6345 bp->link_params.req_line_speed = SPEED_10;
6346 bp->link_params.req_duplex = DUPLEX_HALF;
6347 bp->port.advertising = (ADVERTISED_10baseT_Half |
6348 ADVERTISED_TP);
6349 } else {
6350 BNX2X_ERROR("NVRAM config error. "
6351 "Invalid link_config 0x%x"
6352 " speed_cap_mask 0x%x\n",
6353 bp->port.link_config,
6354 bp->link_params.speed_cap_mask);
6355 return;
6356 }
6357 break;
6358
6359 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6360 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6361 bp->link_params.req_line_speed = SPEED_100;
6362 bp->port.advertising = (ADVERTISED_100baseT_Full |
6363 ADVERTISED_TP);
6364 } else {
6365 BNX2X_ERROR("NVRAM config error. "
6366 "Invalid link_config 0x%x"
6367 " speed_cap_mask 0x%x\n",
6368 bp->port.link_config,
6369 bp->link_params.speed_cap_mask);
6370 return;
6371 }
6372 break;
6373
6374 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6375 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6376 bp->link_params.req_line_speed = SPEED_100;
6377 bp->link_params.req_duplex = DUPLEX_HALF;
6378 bp->port.advertising = (ADVERTISED_100baseT_Half |
6379 ADVERTISED_TP);
6380 } else {
6381 BNX2X_ERROR("NVRAM config error. "
6382 "Invalid link_config 0x%x"
6383 " speed_cap_mask 0x%x\n",
6384 bp->port.link_config,
6385 bp->link_params.speed_cap_mask);
6386 return;
6387 }
6388 break;
6389
6390 case PORT_FEATURE_LINK_SPEED_1G:
6391 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6392 bp->link_params.req_line_speed = SPEED_1000;
6393 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6394 ADVERTISED_TP);
6395 } else {
6396 BNX2X_ERROR("NVRAM config error. "
6397 "Invalid link_config 0x%x"
6398 " speed_cap_mask 0x%x\n",
6399 bp->port.link_config,
6400 bp->link_params.speed_cap_mask);
6401 return;
6402 }
6403 break;
6404
6405 case PORT_FEATURE_LINK_SPEED_2_5G:
6406 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6407 bp->link_params.req_line_speed = SPEED_2500;
6408 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6409 ADVERTISED_TP);
6410 } else {
6411 BNX2X_ERROR("NVRAM config error. "
6412 "Invalid link_config 0x%x"
6413 " speed_cap_mask 0x%x\n",
6414 bp->port.link_config,
6415 bp->link_params.speed_cap_mask);
6416 return;
6417 }
6418 break;
6419
6420 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6421 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6422 case PORT_FEATURE_LINK_SPEED_10G_KR:
6423 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6424 bp->link_params.req_line_speed = SPEED_10000;
6425 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6426 ADVERTISED_FIBRE);
6427 } else {
6428 BNX2X_ERROR("NVRAM config error. "
6429 "Invalid link_config 0x%x"
6430 " speed_cap_mask 0x%x\n",
6431 bp->port.link_config,
6432 bp->link_params.speed_cap_mask);
6433 return;
6434 }
6435 break;
6436
6437 default:
6438 BNX2X_ERROR("NVRAM config error. "
6439 "BAD link speed link_config 0x%x\n",
6440 bp->port.link_config);
6441 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6442 bp->port.advertising = bp->port.supported;
6443 break;
6444 }
6445
6446 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6447 PORT_FEATURE_FLOW_CONTROL_MASK);
6448 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6449 !(bp->port.supported & SUPPORTED_Autoneg))
6450 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6451
6452 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
6453 " advertising 0x%x\n",
6454 bp->link_params.req_line_speed,
6455 bp->link_params.req_duplex,
6456 bp->link_params.req_flow_ctrl, bp->port.advertising);
6457}
6458
6459static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6460{
6461 mac_hi = cpu_to_be16(mac_hi);
6462 mac_lo = cpu_to_be32(mac_lo);
6463 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6464 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6465}
6466
6467static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6468{
6469 int port = BP_PORT(bp);
6470 u32 val, val2;
6471 u32 config;
6472 u16 i;
6473 u32 ext_phy_type;
6474
6475 bp->link_params.bp = bp;
6476 bp->link_params.port = port;
6477
6478 bp->link_params.lane_config =
6479 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6480 bp->link_params.ext_phy_config =
6481 SHMEM_RD(bp,
6482 dev_info.port_hw_config[port].external_phy_config);
6483 /* BCM8727_NOC => BCM8727 no over current */
6484 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6485 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6486 bp->link_params.ext_phy_config &=
6487 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6488 bp->link_params.ext_phy_config |=
6489 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6490 bp->link_params.feature_config_flags |=
6491 FEATURE_CONFIG_BCM8727_NOC;
6492 }
6493
6494 bp->link_params.speed_cap_mask =
6495 SHMEM_RD(bp,
6496 dev_info.port_hw_config[port].speed_capability_mask);
6497
6498 bp->port.link_config =
6499 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6500
6501 /* Get the 4 lanes xgxs config rx and tx */
6502 for (i = 0; i < 2; i++) {
6503 val = SHMEM_RD(bp,
6504 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6505 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6506 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6507
6508 val = SHMEM_RD(bp,
6509 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6510 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6511 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6512 }
6513
6514 /* If the device is capable of WoL, set the default state according
6515 * to the HW
6516 */
6517 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6518 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6519 (config & PORT_FEATURE_WOL_ENABLED));
6520
6521 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
6522 " speed_cap_mask 0x%08x link_config 0x%08x\n",
6523 bp->link_params.lane_config,
6524 bp->link_params.ext_phy_config,
6525 bp->link_params.speed_cap_mask, bp->port.link_config);
6526
6527 bp->link_params.switch_cfg |= (bp->port.link_config &
6528 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6529 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6530
6531 bnx2x_link_settings_requested(bp);
6532
6533 /*
6534 * If connected directly, work with the internal PHY, otherwise, work
6535 * with the external PHY
6536 */
6537 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6538 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6539 bp->mdio.prtad = bp->link_params.phy_addr;
6540
6541 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6542 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6543 bp->mdio.prtad =
6544 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
6545
6546 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6547 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6548 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6549 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6550 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6551
6552#ifdef BCM_CNIC
6553 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6554 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6555 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6556#endif
6557}
6558
6559static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6560{
6561 int func = BP_FUNC(bp);
6562 u32 val, val2;
6563 int rc = 0;
6564
6565 bnx2x_get_common_hwinfo(bp);
6566
6567 bp->e1hov = 0;
6568 bp->e1hmf = 0;
6569 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6570 bp->mf_config =
6571 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6572
6573 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6574 FUNC_MF_CFG_E1HOV_TAG_MASK);
6575 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6576 bp->e1hmf = 1;
6577 BNX2X_DEV_INFO("%s function mode\n",
6578 IS_E1HMF(bp) ? "multi" : "single");
6579
6580 if (IS_E1HMF(bp)) {
6581 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6582 e1hov_tag) &
6583 FUNC_MF_CFG_E1HOV_TAG_MASK);
6584 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6585 bp->e1hov = val;
6586 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6587 "(0x%04x)\n",
6588 func, bp->e1hov, bp->e1hov);
6589 } else {
6590 BNX2X_ERROR("No valid E1HOV for func %d,"
6591 " aborting\n", func);
6592 rc = -EPERM;
6593 }
6594 } else {
6595 if (BP_E1HVN(bp)) {
6596 BNX2X_ERROR("VN %d in single function mode,"
6597 " aborting\n", BP_E1HVN(bp));
6598 rc = -EPERM;
6599 }
6600 }
6601 }
6602
6603 if (!BP_NOMCP(bp)) {
6604 bnx2x_get_port_hwinfo(bp);
6605
6606 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6607 DRV_MSG_SEQ_NUMBER_MASK);
6608 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6609 }
6610
6611 if (IS_E1HMF(bp)) {
6612 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6613 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6614 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6615 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6616 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6617 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6618 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6619 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6620 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6621 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6622 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6623 ETH_ALEN);
6624 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6625 ETH_ALEN);
6626 }
6627
6628 return rc;
6629 }
6630
6631 if (BP_NOMCP(bp)) {
6632 /* only supposed to happen on emulation/FPGA */
6633 BNX2X_ERROR("warning: random MAC workaround active\n");
6634 random_ether_addr(bp->dev->dev_addr);
6635 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6636 }
6637
6638 return rc;
6639}
6640
6641static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6642{
6643 int cnt, i, block_end, rodi;
6644 char vpd_data[BNX2X_VPD_LEN+1];
6645 char str_id_reg[VENDOR_ID_LEN+1];
6646 char str_id_cap[VENDOR_ID_LEN+1];
6647 u8 len;
6648
6649 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6650 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6651
6652 if (cnt < BNX2X_VPD_LEN)
6653 goto out_not_found;
6654
6655 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6656 PCI_VPD_LRDT_RO_DATA);
6657 if (i < 0)
6658 goto out_not_found;
6659
6660
6661 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6662 pci_vpd_lrdt_size(&vpd_data[i]);
6663
6664 i += PCI_VPD_LRDT_TAG_SIZE;
6665
6666 if (block_end > BNX2X_VPD_LEN)
6667 goto out_not_found;
6668
6669 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6670 PCI_VPD_RO_KEYWORD_MFR_ID);
6671 if (rodi < 0)
6672 goto out_not_found;
6673
6674 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6675
6676 if (len != VENDOR_ID_LEN)
6677 goto out_not_found;
6678
6679 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6680
6681 /* vendor specific info */
6682 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6683 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6684 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6685 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6686
6687 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6688 PCI_VPD_RO_KEYWORD_VENDOR0);
6689 if (rodi >= 0) {
6690 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6691
6692 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6693
6694 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6695 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6696 bp->fw_ver[len] = ' ';
6697 }
6698 }
6699 return;
6700 }
6701out_not_found:
6702 return;
6703}
6704
6705static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6706{
6707 int func = BP_FUNC(bp);
6708 int timer_interval;
6709 int rc;
6710
6711 /* Disable interrupt handling until HW is initialized */
6712 atomic_set(&bp->intr_sem, 1);
6713 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6714
6715 mutex_init(&bp->port.phy_mutex);
6716 mutex_init(&bp->fw_mb_mutex);
6717 spin_lock_init(&bp->stats_lock);
6718#ifdef BCM_CNIC
6719 mutex_init(&bp->cnic_mutex);
6720#endif
6721
6722 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6723 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6724
6725 rc = bnx2x_get_hwinfo(bp);
6726
6727 bnx2x_read_fwinfo(bp);
6728 /* need to reset chip if undi was active */
6729 if (!BP_NOMCP(bp))
6730 bnx2x_undi_unload(bp);
6731
6732 if (CHIP_REV_IS_FPGA(bp))
6733 dev_err(&bp->pdev->dev, "FPGA detected\n");
6734
6735 if (BP_NOMCP(bp) && (func == 0))
6736 dev_err(&bp->pdev->dev, "MCP disabled, "
6737 "must load devices in order!\n");
6738
6739 /* Set multi queue mode */
6740 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6741 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6742 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6743 "requested is not MSI-X\n");
6744 multi_mode = ETH_RSS_MODE_DISABLED;
6745 }
6746 bp->multi_mode = multi_mode;
6747 bp->int_mode = int_mode;
6748
6749 bp->dev->features |= NETIF_F_GRO;
6750
6751 /* Set TPA flags */
6752 if (disable_tpa) {
6753 bp->flags &= ~TPA_ENABLE_FLAG;
6754 bp->dev->features &= ~NETIF_F_LRO;
6755 } else {
6756 bp->flags |= TPA_ENABLE_FLAG;
6757 bp->dev->features |= NETIF_F_LRO;
6758 }
6759 bp->disable_tpa = disable_tpa;
6760
6761 if (CHIP_IS_E1(bp))
6762 bp->dropless_fc = 0;
6763 else
6764 bp->dropless_fc = dropless_fc;
6765
6766 bp->mrrs = mrrs;
6767
6768 bp->tx_ring_size = MAX_TX_AVAIL;
6769 bp->rx_ring_size = MAX_RX_AVAIL;
6770
6771 bp->rx_csum = 1;
6772
6773 /* make sure that the numbers are in the right granularity */
6774 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6775 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6776
6777 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6778 bp->current_interval = (poll ? poll : timer_interval);
6779
6780 init_timer(&bp->timer);
6781 bp->timer.expires = jiffies + bp->current_interval;
6782 bp->timer.data = (unsigned long) bp;
6783 bp->timer.function = bnx2x_timer;
6784
6785 return rc;
6786}
6787
6788
6789/****************************************************************************
6790* General service functions
6791****************************************************************************/
6792
6793/* called with rtnl_lock */
6794static int bnx2x_open(struct net_device *dev)
6795{
6796 struct bnx2x *bp = netdev_priv(dev);
6797
6798 netif_carrier_off(dev);
6799
6800 bnx2x_set_power_state(bp, PCI_D0);
6801
6802 if (!bnx2x_reset_is_done(bp)) {
6803 do {
6804 /* Reset MCP mail box sequence if there is on going
6805 * recovery
6806 */
6807 bp->fw_seq = 0;
6808
6809 /* If it's the first function to load and reset done
6810 * is still not cleared it may mean that. We don't
6811 * check the attention state here because it may have
6812 * already been cleared by a "common" reset but we
6813 * shell proceed with "process kill" anyway.
6814 */
6815 if ((bnx2x_get_load_cnt(bp) == 0) &&
6816 bnx2x_trylock_hw_lock(bp,
6817 HW_LOCK_RESOURCE_RESERVED_08) &&
6818 (!bnx2x_leader_reset(bp))) {
6819 DP(NETIF_MSG_HW, "Recovered in open\n");
6820 break;
6821 }
6822
6823 bnx2x_set_power_state(bp, PCI_D3hot);
6824
6825 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6826 " completed yet. Try again later. If u still see this"
6827 " message after a few retries then power cycle is"
6828 " required.\n", bp->dev->name);
6829
6830 return -EAGAIN;
6831 } while (0);
6832 }
6833
6834 bp->recovery_state = BNX2X_RECOVERY_DONE;
6835
6836 return bnx2x_nic_load(bp, LOAD_OPEN);
6837}
6838
6839/* called with rtnl_lock */
6840static int bnx2x_close(struct net_device *dev)
6841{
6842 struct bnx2x *bp = netdev_priv(dev);
6843
6844 /* Unload the driver, release IRQs */
6845 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6846 bnx2x_set_power_state(bp, PCI_D3hot);
6847
6848 return 0;
6849}
6850
6851/* called with netif_tx_lock from dev_mcast.c */
6852void bnx2x_set_rx_mode(struct net_device *dev)
6853{
6854 struct bnx2x *bp = netdev_priv(dev);
6855 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6856 int port = BP_PORT(bp);
6857
6858 if (bp->state != BNX2X_STATE_OPEN) {
6859 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6860 return;
6861 }
6862
6863 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6864
6865 if (dev->flags & IFF_PROMISC)
6866 rx_mode = BNX2X_RX_MODE_PROMISC;
6867
6868 else if ((dev->flags & IFF_ALLMULTI) ||
6869 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6870 CHIP_IS_E1(bp)))
6871 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6872
6873 else { /* some multicasts */
6874 if (CHIP_IS_E1(bp)) {
6875 int i, old, offset;
6876 struct netdev_hw_addr *ha;
6877 struct mac_configuration_cmd *config =
6878 bnx2x_sp(bp, mcast_config);
6879
6880 i = 0;
6881 netdev_for_each_mc_addr(ha, dev) {
6882 config->config_table[i].
6883 cam_entry.msb_mac_addr =
6884 swab16(*(u16 *)&ha->addr[0]);
6885 config->config_table[i].
6886 cam_entry.middle_mac_addr =
6887 swab16(*(u16 *)&ha->addr[2]);
6888 config->config_table[i].
6889 cam_entry.lsb_mac_addr =
6890 swab16(*(u16 *)&ha->addr[4]);
6891 config->config_table[i].cam_entry.flags =
6892 cpu_to_le16(port);
6893 config->config_table[i].
6894 target_table_entry.flags = 0;
6895 config->config_table[i].target_table_entry.
6896 clients_bit_vector =
6897 cpu_to_le32(1 << BP_L_ID(bp));
6898 config->config_table[i].
6899 target_table_entry.vlan_id = 0;
6900
6901 DP(NETIF_MSG_IFUP,
6902 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6903 config->config_table[i].
6904 cam_entry.msb_mac_addr,
6905 config->config_table[i].
6906 cam_entry.middle_mac_addr,
6907 config->config_table[i].
6908 cam_entry.lsb_mac_addr);
6909 i++;
6910 }
6911 old = config->hdr.length;
6912 if (old > i) {
6913 for (; i < old; i++) {
6914 if (CAM_IS_INVALID(config->
6915 config_table[i])) {
6916 /* already invalidated */
6917 break;
6918 }
6919 /* invalidate */
6920 CAM_INVALIDATE(config->
6921 config_table[i]);
6922 }
6923 }
6924
6925 if (CHIP_REV_IS_SLOW(bp))
6926 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6927 else
6928 offset = BNX2X_MAX_MULTICAST*(1 + port);
6929
6930 config->hdr.length = i;
6931 config->hdr.offset = offset;
6932 config->hdr.client_id = bp->fp->cl_id;
6933 config->hdr.reserved1 = 0;
6934
6935 bp->set_mac_pending++;
6936 smp_wmb();
6937
6938 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6939 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6940 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6941 0);
6942 } else { /* E1H */
6943 /* Accept one or more multicasts */
6944 struct netdev_hw_addr *ha;
6945 u32 mc_filter[MC_HASH_SIZE];
6946 u32 crc, bit, regidx;
6947 int i;
6948
6949 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6950
6951 netdev_for_each_mc_addr(ha, dev) {
6952 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6953 ha->addr);
6954
6955 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6956 bit = (crc >> 24) & 0xff;
6957 regidx = bit >> 5;
6958 bit &= 0x1f;
6959 mc_filter[regidx] |= (1 << bit);
6960 }
6961
6962 for (i = 0; i < MC_HASH_SIZE; i++)
6963 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6964 mc_filter[i]);
6965 }
6966 }
6967
6968 bp->rx_mode = rx_mode;
6969 bnx2x_set_storm_rx_mode(bp);
6970}
6971
6972
6973/* called with rtnl_lock */
6974static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6975 int devad, u16 addr)
6976{
6977 struct bnx2x *bp = netdev_priv(netdev);
6978 u16 value;
6979 int rc;
6980 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6981
6982 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6983 prtad, devad, addr);
6984
6985 if (prtad != bp->mdio.prtad) {
6986 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6987 prtad, bp->mdio.prtad);
6988 return -EINVAL;
6989 }
6990
6991 /* The HW expects different devad if CL22 is used */
6992 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6993
6994 bnx2x_acquire_phy_lock(bp);
6995 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
6996 devad, addr, &value);
6997 bnx2x_release_phy_lock(bp);
6998 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6999
7000 if (!rc)
7001 rc = value;
7002 return rc;
7003}
7004
7005/* called with rtnl_lock */
7006static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7007 u16 addr, u16 value)
7008{
7009 struct bnx2x *bp = netdev_priv(netdev);
7010 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7011 int rc;
7012
7013 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7014 " value 0x%x\n", prtad, devad, addr, value);
7015
7016 if (prtad != bp->mdio.prtad) {
7017 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7018 prtad, bp->mdio.prtad);
7019 return -EINVAL;
7020 }
7021
7022 /* The HW expects different devad if CL22 is used */
7023 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7024
7025 bnx2x_acquire_phy_lock(bp);
7026 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7027 devad, addr, value);
7028 bnx2x_release_phy_lock(bp);
7029 return rc;
7030}
7031
7032/* called with rtnl_lock */
7033static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7034{
7035 struct bnx2x *bp = netdev_priv(dev);
7036 struct mii_ioctl_data *mdio = if_mii(ifr);
7037
7038 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7039 mdio->phy_id, mdio->reg_num, mdio->val_in);
7040
7041 if (!netif_running(dev))
7042 return -EAGAIN;
7043
7044 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7045}
7046
7047#ifdef CONFIG_NET_POLL_CONTROLLER
7048static void poll_bnx2x(struct net_device *dev)
7049{
7050 struct bnx2x *bp = netdev_priv(dev);
7051
7052 disable_irq(bp->pdev->irq);
7053 bnx2x_interrupt(bp->pdev->irq, dev);
7054 enable_irq(bp->pdev->irq);
7055}
7056#endif
7057
7058static const struct net_device_ops bnx2x_netdev_ops = {
7059 .ndo_open = bnx2x_open,
7060 .ndo_stop = bnx2x_close,
7061 .ndo_start_xmit = bnx2x_start_xmit,
7062 .ndo_set_multicast_list = bnx2x_set_rx_mode,
7063 .ndo_set_mac_address = bnx2x_change_mac_addr,
7064 .ndo_validate_addr = eth_validate_addr,
7065 .ndo_do_ioctl = bnx2x_ioctl,
7066 .ndo_change_mtu = bnx2x_change_mtu,
7067 .ndo_tx_timeout = bnx2x_tx_timeout,
7068#ifdef BCM_VLAN
7069 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7070#endif
7071#ifdef CONFIG_NET_POLL_CONTROLLER
7072 .ndo_poll_controller = poll_bnx2x,
7073#endif
7074};
7075
7076static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7077 struct net_device *dev)
7078{
7079 struct bnx2x *bp;
7080 int rc;
7081
7082 SET_NETDEV_DEV(dev, &pdev->dev);
7083 bp = netdev_priv(dev);
7084
7085 bp->dev = dev;
7086 bp->pdev = pdev;
7087 bp->flags = 0;
7088 bp->func = PCI_FUNC(pdev->devfn);
7089
7090 rc = pci_enable_device(pdev);
7091 if (rc) {
7092 dev_err(&bp->pdev->dev,
7093 "Cannot enable PCI device, aborting\n");
7094 goto err_out;
7095 }
7096
7097 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7098 dev_err(&bp->pdev->dev,
7099 "Cannot find PCI device base address, aborting\n");
7100 rc = -ENODEV;
7101 goto err_out_disable;
7102 }
7103
7104 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7105 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7106 " base address, aborting\n");
7107 rc = -ENODEV;
7108 goto err_out_disable;
7109 }
7110
7111 if (atomic_read(&pdev->enable_cnt) == 1) {
7112 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7113 if (rc) {
7114 dev_err(&bp->pdev->dev,
7115 "Cannot obtain PCI resources, aborting\n");
7116 goto err_out_disable;
7117 }
7118
7119 pci_set_master(pdev);
7120 pci_save_state(pdev);
7121 }
7122
7123 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7124 if (bp->pm_cap == 0) {
7125 dev_err(&bp->pdev->dev,
7126 "Cannot find power management capability, aborting\n");
7127 rc = -EIO;
7128 goto err_out_release;
7129 }
7130
7131 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7132 if (bp->pcie_cap == 0) {
7133 dev_err(&bp->pdev->dev,
7134 "Cannot find PCI Express capability, aborting\n");
7135 rc = -EIO;
7136 goto err_out_release;
7137 }
7138
7139 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7140 bp->flags |= USING_DAC_FLAG;
7141 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7142 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7143 " failed, aborting\n");
7144 rc = -EIO;
7145 goto err_out_release;
7146 }
7147
7148 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7149 dev_err(&bp->pdev->dev,
7150 "System does not support DMA, aborting\n");
7151 rc = -EIO;
7152 goto err_out_release;
7153 }
7154
7155 dev->mem_start = pci_resource_start(pdev, 0);
7156 dev->base_addr = dev->mem_start;
7157 dev->mem_end = pci_resource_end(pdev, 0);
7158
7159 dev->irq = pdev->irq;
7160
7161 bp->regview = pci_ioremap_bar(pdev, 0);
7162 if (!bp->regview) {
7163 dev_err(&bp->pdev->dev,
7164 "Cannot map register space, aborting\n");
7165 rc = -ENOMEM;
7166 goto err_out_release;
7167 }
7168
7169 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7170 min_t(u64, BNX2X_DB_SIZE,
7171 pci_resource_len(pdev, 2)));
7172 if (!bp->doorbells) {
7173 dev_err(&bp->pdev->dev,
7174 "Cannot map doorbell space, aborting\n");
7175 rc = -ENOMEM;
7176 goto err_out_unmap;
7177 }
7178
7179 bnx2x_set_power_state(bp, PCI_D0);
7180
7181 /* clean indirect addresses */
7182 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7183 PCICFG_VENDOR_ID_OFFSET);
7184 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7185 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7186 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7187 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7188
7189 /* Reset the load counter */
7190 bnx2x_clear_load_cnt(bp);
7191
7192 dev->watchdog_timeo = TX_TIMEOUT;
7193
7194 dev->netdev_ops = &bnx2x_netdev_ops;
7195 bnx2x_set_ethtool_ops(dev);
7196 dev->features |= NETIF_F_SG;
7197 dev->features |= NETIF_F_HW_CSUM;
7198 if (bp->flags & USING_DAC_FLAG)
7199 dev->features |= NETIF_F_HIGHDMA;
7200 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7201 dev->features |= NETIF_F_TSO6;
7202#ifdef BCM_VLAN
7203 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7204 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7205
7206 dev->vlan_features |= NETIF_F_SG;
7207 dev->vlan_features |= NETIF_F_HW_CSUM;
7208 if (bp->flags & USING_DAC_FLAG)
7209 dev->vlan_features |= NETIF_F_HIGHDMA;
7210 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7211 dev->vlan_features |= NETIF_F_TSO6;
7212#endif
7213
7214 /* get_port_hwinfo() will set prtad and mmds properly */
7215 bp->mdio.prtad = MDIO_PRTAD_NONE;
7216 bp->mdio.mmds = 0;
7217 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7218 bp->mdio.dev = dev;
7219 bp->mdio.mdio_read = bnx2x_mdio_read;
7220 bp->mdio.mdio_write = bnx2x_mdio_write;
7221
7222 return 0;
7223
7224err_out_unmap:
7225 if (bp->regview) {
7226 iounmap(bp->regview);
7227 bp->regview = NULL;
7228 }
7229 if (bp->doorbells) {
7230 iounmap(bp->doorbells);
7231 bp->doorbells = NULL;
7232 }
7233
7234err_out_release:
7235 if (atomic_read(&pdev->enable_cnt) == 1)
7236 pci_release_regions(pdev);
7237
7238err_out_disable:
7239 pci_disable_device(pdev);
7240 pci_set_drvdata(pdev, NULL);
7241
7242err_out:
7243 return rc;
7244}
7245
7246static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7247 int *width, int *speed)
7248{
7249 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7250
7251 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7252
7253 /* return value of 1=2.5GHz 2=5GHz */
7254 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7255}
7256
7257static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
7258{
7259 const struct firmware *firmware = bp->firmware;
7260 struct bnx2x_fw_file_hdr *fw_hdr;
7261 struct bnx2x_fw_file_section *sections;
7262 u32 offset, len, num_ops;
7263 u16 *ops_offsets;
7264 int i;
7265 const u8 *fw_ver;
7266
7267 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7268 return -EINVAL;
7269
7270 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7271 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7272
7273 /* Make sure none of the offsets and sizes make us read beyond
7274 * the end of the firmware data */
7275 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7276 offset = be32_to_cpu(sections[i].offset);
7277 len = be32_to_cpu(sections[i].len);
7278 if (offset + len > firmware->size) {
7279 dev_err(&bp->pdev->dev,
7280 "Section %d length is out of bounds\n", i);
7281 return -EINVAL;
7282 }
7283 }
7284
7285 /* Likewise for the init_ops offsets */
7286 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7287 ops_offsets = (u16 *)(firmware->data + offset);
7288 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7289
7290 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7291 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7292 dev_err(&bp->pdev->dev,
7293 "Section offset %d is out of bounds\n", i);
7294 return -EINVAL;
7295 }
7296 }
7297
7298 /* Check FW version */
7299 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7300 fw_ver = firmware->data + offset;
7301 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7302 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7303 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7304 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7305 dev_err(&bp->pdev->dev,
7306 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7307 fw_ver[0], fw_ver[1], fw_ver[2],
7308 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7309 BCM_5710_FW_MINOR_VERSION,
7310 BCM_5710_FW_REVISION_VERSION,
7311 BCM_5710_FW_ENGINEERING_VERSION);
7312 return -EINVAL;
7313 }
7314
7315 return 0;
7316}
7317
7318static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7319{
7320 const __be32 *source = (const __be32 *)_source;
7321 u32 *target = (u32 *)_target;
7322 u32 i;
7323
7324 for (i = 0; i < n/4; i++)
7325 target[i] = be32_to_cpu(source[i]);
7326}
7327
7328/*
7329 Ops array is stored in the following format:
7330 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7331 */
7332static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7333{
7334 const __be32 *source = (const __be32 *)_source;
7335 struct raw_op *target = (struct raw_op *)_target;
7336 u32 i, j, tmp;
7337
7338 for (i = 0, j = 0; i < n/8; i++, j += 2) {
7339 tmp = be32_to_cpu(source[j]);
7340 target[i].op = (tmp >> 24) & 0xff;
7341 target[i].offset = tmp & 0xffffff;
7342 target[i].raw_data = be32_to_cpu(source[j + 1]);
7343 }
7344}
7345
7346static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7347{
7348 const __be16 *source = (const __be16 *)_source;
7349 u16 *target = (u16 *)_target;
7350 u32 i;
7351
7352 for (i = 0; i < n/2; i++)
7353 target[i] = be16_to_cpu(source[i]);
7354}
7355
7356#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7357do { \
7358 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7359 bp->arr = kmalloc(len, GFP_KERNEL); \
7360 if (!bp->arr) { \
7361 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7362 goto lbl; \
7363 } \
7364 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7365 (u8 *)bp->arr, len); \
7366} while (0)
7367
7368static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7369{
7370 const char *fw_file_name;
7371 struct bnx2x_fw_file_hdr *fw_hdr;
7372 int rc;
7373
7374 if (CHIP_IS_E1(bp))
7375 fw_file_name = FW_FILE_NAME_E1;
7376 else if (CHIP_IS_E1H(bp))
7377 fw_file_name = FW_FILE_NAME_E1H;
7378 else {
7379 dev_err(dev, "Unsupported chip revision\n");
7380 return -EINVAL;
7381 }
7382
7383 dev_info(dev, "Loading %s\n", fw_file_name);
7384
7385 rc = request_firmware(&bp->firmware, fw_file_name, dev);
7386 if (rc) {
7387 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
7388 goto request_firmware_exit;
7389 }
7390
7391 rc = bnx2x_check_firmware(bp);
7392 if (rc) {
7393 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
7394 goto request_firmware_exit;
7395 }
7396
7397 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7398
7399 /* Initialize the pointers to the init arrays */
7400 /* Blob */
7401 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7402
7403 /* Opcodes */
7404 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7405
7406 /* Offsets */
7407 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7408 be16_to_cpu_n);
7409
7410 /* STORMs firmware */
7411 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7412 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7413 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7414 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7415 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7416 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7417 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7418 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7419 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7420 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7421 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7422 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7423 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7424 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7425 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7426 be32_to_cpu(fw_hdr->csem_pram_data.offset);
7427
7428 return 0;
7429
7430init_offsets_alloc_err:
7431 kfree(bp->init_ops);
7432init_ops_alloc_err:
7433 kfree(bp->init_data);
7434request_firmware_exit:
7435 release_firmware(bp->firmware);
7436
7437 return rc;
7438}
7439
7440
7441static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7442 const struct pci_device_id *ent)
7443{
7444 struct net_device *dev = NULL;
7445 struct bnx2x *bp;
7446 int pcie_width, pcie_speed;
7447 int rc;
7448
7449 /* dev zeroed in init_etherdev */
7450 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7451 if (!dev) {
7452 dev_err(&pdev->dev, "Cannot allocate net device\n");
7453 return -ENOMEM;
7454 }
7455
7456 bp = netdev_priv(dev);
7457 bp->msg_enable = debug;
7458
7459 pci_set_drvdata(pdev, dev);
7460
7461 rc = bnx2x_init_dev(pdev, dev);
7462 if (rc < 0) {
7463 free_netdev(dev);
7464 return rc;
7465 }
7466
7467 rc = bnx2x_init_bp(bp);
7468 if (rc)
7469 goto init_one_exit;
7470
7471 /* Set init arrays */
7472 rc = bnx2x_init_firmware(bp, &pdev->dev);
7473 if (rc) {
7474 dev_err(&pdev->dev, "Error loading firmware\n");
7475 goto init_one_exit;
7476 }
7477
7478 rc = register_netdev(dev);
7479 if (rc) {
7480 dev_err(&pdev->dev, "Cannot register net device\n");
7481 goto init_one_exit;
7482 }
7483
7484 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7485 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7486 " IRQ %d, ", board_info[ent->driver_data].name,
7487 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7488 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7489 dev->base_addr, bp->pdev->irq);
7490 pr_cont("node addr %pM\n", dev->dev_addr);
7491
7492 return 0;
7493
7494init_one_exit:
7495 if (bp->regview)
7496 iounmap(bp->regview);
7497
7498 if (bp->doorbells)
7499 iounmap(bp->doorbells);
7500
7501 free_netdev(dev);
7502
7503 if (atomic_read(&pdev->enable_cnt) == 1)
7504 pci_release_regions(pdev);
7505
7506 pci_disable_device(pdev);
7507 pci_set_drvdata(pdev, NULL);
7508
7509 return rc;
7510}
7511
7512static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7513{
7514 struct net_device *dev = pci_get_drvdata(pdev);
7515 struct bnx2x *bp;
7516
7517 if (!dev) {
7518 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7519 return;
7520 }
7521 bp = netdev_priv(dev);
7522
7523 unregister_netdev(dev);
7524
7525 /* Make sure RESET task is not scheduled before continuing */
7526 cancel_delayed_work_sync(&bp->reset_task);
7527
7528 kfree(bp->init_ops_offsets);
7529 kfree(bp->init_ops);
7530 kfree(bp->init_data);
7531 release_firmware(bp->firmware);
7532
7533 if (bp->regview)
7534 iounmap(bp->regview);
7535
7536 if (bp->doorbells)
7537 iounmap(bp->doorbells);
7538
7539 free_netdev(dev);
7540
7541 if (atomic_read(&pdev->enable_cnt) == 1)
7542 pci_release_regions(pdev);
7543
7544 pci_disable_device(pdev);
7545 pci_set_drvdata(pdev, NULL);
7546}
7547
7548static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7549{
7550 int i;
7551
7552 bp->state = BNX2X_STATE_ERROR;
7553
7554 bp->rx_mode = BNX2X_RX_MODE_NONE;
7555
7556 bnx2x_netif_stop(bp, 0);
7557 netif_carrier_off(bp->dev);
7558
7559 del_timer_sync(&bp->timer);
7560 bp->stats_state = STATS_STATE_DISABLED;
7561 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7562
7563 /* Release IRQs */
7564 bnx2x_free_irq(bp, false);
7565
7566 if (CHIP_IS_E1(bp)) {
7567 struct mac_configuration_cmd *config =
7568 bnx2x_sp(bp, mcast_config);
7569
7570 for (i = 0; i < config->hdr.length; i++)
7571 CAM_INVALIDATE(config->config_table[i]);
7572 }
7573
7574 /* Free SKBs, SGEs, TPA pool and driver internals */
7575 bnx2x_free_skbs(bp);
7576 for_each_queue(bp, i)
7577 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7578 for_each_queue(bp, i)
7579 netif_napi_del(&bnx2x_fp(bp, i, napi));
7580 bnx2x_free_mem(bp);
7581
7582 bp->state = BNX2X_STATE_CLOSED;
7583
7584 return 0;
7585}
7586
7587static void bnx2x_eeh_recover(struct bnx2x *bp)
7588{
7589 u32 val;
7590
7591 mutex_init(&bp->port.phy_mutex);
7592
7593 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7594 bp->link_params.shmem_base = bp->common.shmem_base;
7595 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7596
7597 if (!bp->common.shmem_base ||
7598 (bp->common.shmem_base < 0xA0000) ||
7599 (bp->common.shmem_base >= 0xC0000)) {
7600 BNX2X_DEV_INFO("MCP not active\n");
7601 bp->flags |= NO_MCP_FLAG;
7602 return;
7603 }
7604
7605 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7606 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7607 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7608 BNX2X_ERR("BAD MCP validity signature\n");
7609
7610 if (!BP_NOMCP(bp)) {
7611 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7612 & DRV_MSG_SEQ_NUMBER_MASK);
7613 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7614 }
7615}
7616
7617/**
7618 * bnx2x_io_error_detected - called when PCI error is detected
7619 * @pdev: Pointer to PCI device
7620 * @state: The current pci connection state
7621 *
7622 * This function is called after a PCI bus error affecting
7623 * this device has been detected.
7624 */
7625static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7626 pci_channel_state_t state)
7627{
7628 struct net_device *dev = pci_get_drvdata(pdev);
7629 struct bnx2x *bp = netdev_priv(dev);
7630
7631 rtnl_lock();
7632
7633 netif_device_detach(dev);
7634
7635 if (state == pci_channel_io_perm_failure) {
7636 rtnl_unlock();
7637 return PCI_ERS_RESULT_DISCONNECT;
7638 }
7639
7640 if (netif_running(dev))
7641 bnx2x_eeh_nic_unload(bp);
7642
7643 pci_disable_device(pdev);
7644
7645 rtnl_unlock();
7646
7647 /* Request a slot reset */
7648 return PCI_ERS_RESULT_NEED_RESET;
7649}
7650
7651/**
7652 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7653 * @pdev: Pointer to PCI device
7654 *
7655 * Restart the card from scratch, as if from a cold-boot.
7656 */
7657static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7658{
7659 struct net_device *dev = pci_get_drvdata(pdev);
7660 struct bnx2x *bp = netdev_priv(dev);
7661
7662 rtnl_lock();
7663
7664 if (pci_enable_device(pdev)) {
7665 dev_err(&pdev->dev,
7666 "Cannot re-enable PCI device after reset\n");
7667 rtnl_unlock();
7668 return PCI_ERS_RESULT_DISCONNECT;
7669 }
7670
7671 pci_set_master(pdev);
7672 pci_restore_state(pdev);
7673
7674 if (netif_running(dev))
7675 bnx2x_set_power_state(bp, PCI_D0);
7676
7677 rtnl_unlock();
7678
7679 return PCI_ERS_RESULT_RECOVERED;
7680}
7681
7682/**
7683 * bnx2x_io_resume - called when traffic can start flowing again
7684 * @pdev: Pointer to PCI device
7685 *
7686 * This callback is called when the error recovery driver tells us that
7687 * its OK to resume normal operation.
7688 */
7689static void bnx2x_io_resume(struct pci_dev *pdev)
7690{
7691 struct net_device *dev = pci_get_drvdata(pdev);
7692 struct bnx2x *bp = netdev_priv(dev);
7693
7694 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7695 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7696 return;
7697 }
7698
7699 rtnl_lock();
7700
7701 bnx2x_eeh_recover(bp);
7702
7703 if (netif_running(dev))
7704 bnx2x_nic_load(bp, LOAD_NORMAL);
7705
7706 netif_device_attach(dev);
7707
7708 rtnl_unlock();
7709}
7710
7711static struct pci_error_handlers bnx2x_err_handler = {
7712 .error_detected = bnx2x_io_error_detected,
7713 .slot_reset = bnx2x_io_slot_reset,
7714 .resume = bnx2x_io_resume,
7715};
7716
7717static struct pci_driver bnx2x_pci_driver = {
7718 .name = DRV_MODULE_NAME,
7719 .id_table = bnx2x_pci_tbl,
7720 .probe = bnx2x_init_one,
7721 .remove = __devexit_p(bnx2x_remove_one),
7722 .suspend = bnx2x_suspend,
7723 .resume = bnx2x_resume,
7724 .err_handler = &bnx2x_err_handler,
7725};
7726
7727static int __init bnx2x_init(void)
7728{
7729 int ret;
7730
7731 pr_info("%s", version);
7732
7733 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7734 if (bnx2x_wq == NULL) {
7735 pr_err("Cannot create workqueue\n");
7736 return -ENOMEM;
7737 }
7738
7739 ret = pci_register_driver(&bnx2x_pci_driver);
7740 if (ret) {
7741 pr_err("Cannot register driver\n");
7742 destroy_workqueue(bnx2x_wq);
7743 }
7744 return ret;
7745}
7746
7747static void __exit bnx2x_cleanup(void)
7748{
7749 pci_unregister_driver(&bnx2x_pci_driver);
7750
7751 destroy_workqueue(bnx2x_wq);
7752}
7753
7754module_init(bnx2x_init);
7755module_exit(bnx2x_cleanup);
7756
7757#ifdef BCM_CNIC
7758
7759/* count denotes the number of new completions we have seen */
7760static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7761{
7762 struct eth_spe *spe;
7763
7764#ifdef BNX2X_STOP_ON_ERROR
7765 if (unlikely(bp->panic))
7766 return;
7767#endif
7768
7769 spin_lock_bh(&bp->spq_lock);
7770 bp->cnic_spq_pending -= count;
7771
7772 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7773 bp->cnic_spq_pending++) {
7774
7775 if (!bp->cnic_kwq_pending)
7776 break;
7777
7778 spe = bnx2x_sp_get_next(bp);
7779 *spe = *bp->cnic_kwq_cons;
7780
7781 bp->cnic_kwq_pending--;
7782
7783 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7784 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7785
7786 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7787 bp->cnic_kwq_cons = bp->cnic_kwq;
7788 else
7789 bp->cnic_kwq_cons++;
7790 }
7791 bnx2x_sp_prod_update(bp);
7792 spin_unlock_bh(&bp->spq_lock);
7793}
7794
7795static int bnx2x_cnic_sp_queue(struct net_device *dev,
7796 struct kwqe_16 *kwqes[], u32 count)
7797{
7798 struct bnx2x *bp = netdev_priv(dev);
7799 int i;
7800
7801#ifdef BNX2X_STOP_ON_ERROR
7802 if (unlikely(bp->panic))
7803 return -EIO;
7804#endif
7805
7806 spin_lock_bh(&bp->spq_lock);
7807
7808 for (i = 0; i < count; i++) {
7809 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7810
7811 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7812 break;
7813
7814 *bp->cnic_kwq_prod = *spe;
7815
7816 bp->cnic_kwq_pending++;
7817
7818 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7819 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7820 spe->data.mac_config_addr.hi,
7821 spe->data.mac_config_addr.lo,
7822 bp->cnic_kwq_pending);
7823
7824 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7825 bp->cnic_kwq_prod = bp->cnic_kwq;
7826 else
7827 bp->cnic_kwq_prod++;
7828 }
7829
7830 spin_unlock_bh(&bp->spq_lock);
7831
7832 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7833 bnx2x_cnic_sp_post(bp, 0);
7834
7835 return i;
7836}
7837
7838static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7839{
7840 struct cnic_ops *c_ops;
7841 int rc = 0;
7842
7843 mutex_lock(&bp->cnic_mutex);
7844 c_ops = bp->cnic_ops;
7845 if (c_ops)
7846 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7847 mutex_unlock(&bp->cnic_mutex);
7848
7849 return rc;
7850}
7851
7852static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7853{
7854 struct cnic_ops *c_ops;
7855 int rc = 0;
7856
7857 rcu_read_lock();
7858 c_ops = rcu_dereference(bp->cnic_ops);
7859 if (c_ops)
7860 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7861 rcu_read_unlock();
7862
7863 return rc;
7864}
7865
7866/*
7867 * for commands that have no data
7868 */
7869int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7870{
7871 struct cnic_ctl_info ctl = {0};
7872
7873 ctl.cmd = cmd;
7874
7875 return bnx2x_cnic_ctl_send(bp, &ctl);
7876}
7877
7878static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7879{
7880 struct cnic_ctl_info ctl;
7881
7882 /* first we tell CNIC and only then we count this as a completion */
7883 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7884 ctl.data.comp.cid = cid;
7885
7886 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7887 bnx2x_cnic_sp_post(bp, 1);
7888}
7889
7890static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7891{
7892 struct bnx2x *bp = netdev_priv(dev);
7893 int rc = 0;
7894
7895 switch (ctl->cmd) {
7896 case DRV_CTL_CTXTBL_WR_CMD: {
7897 u32 index = ctl->data.io.offset;
7898 dma_addr_t addr = ctl->data.io.dma_addr;
7899
7900 bnx2x_ilt_wr(bp, index, addr);
7901 break;
7902 }
7903
7904 case DRV_CTL_COMPLETION_CMD: {
7905 int count = ctl->data.comp.comp_count;
7906
7907 bnx2x_cnic_sp_post(bp, count);
7908 break;
7909 }
7910
7911 /* rtnl_lock is held. */
7912 case DRV_CTL_START_L2_CMD: {
7913 u32 cli = ctl->data.ring.client_id;
7914
7915 bp->rx_mode_cl_mask |= (1 << cli);
7916 bnx2x_set_storm_rx_mode(bp);
7917 break;
7918 }
7919
7920 /* rtnl_lock is held. */
7921 case DRV_CTL_STOP_L2_CMD: {
7922 u32 cli = ctl->data.ring.client_id;
7923
7924 bp->rx_mode_cl_mask &= ~(1 << cli);
7925 bnx2x_set_storm_rx_mode(bp);
7926 break;
7927 }
7928
7929 default:
7930 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7931 rc = -EINVAL;
7932 }
7933
7934 return rc;
7935}
7936
7937void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7938{
7939 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7940
7941 if (bp->flags & USING_MSIX_FLAG) {
7942 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7943 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7944 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7945 } else {
7946 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7947 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7948 }
7949 cp->irq_arr[0].status_blk = bp->cnic_sb;
7950 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7951 cp->irq_arr[1].status_blk = bp->def_status_blk;
7952 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7953
7954 cp->num_irq = 2;
7955}
7956
7957static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7958 void *data)
7959{
7960 struct bnx2x *bp = netdev_priv(dev);
7961 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7962
7963 if (ops == NULL)
7964 return -EINVAL;
7965
7966 if (atomic_read(&bp->intr_sem) != 0)
7967 return -EBUSY;
7968
7969 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7970 if (!bp->cnic_kwq)
7971 return -ENOMEM;
7972
7973 bp->cnic_kwq_cons = bp->cnic_kwq;
7974 bp->cnic_kwq_prod = bp->cnic_kwq;
7975 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7976
7977 bp->cnic_spq_pending = 0;
7978 bp->cnic_kwq_pending = 0;
7979
7980 bp->cnic_data = data;
7981
7982 cp->num_irq = 0;
7983 cp->drv_state = CNIC_DRV_STATE_REGD;
7984
7985 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7986
7987 bnx2x_setup_cnic_irq_info(bp);
7988 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7989 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7990 rcu_assign_pointer(bp->cnic_ops, ops);
7991
7992 return 0;
7993}
7994
7995static int bnx2x_unregister_cnic(struct net_device *dev)
7996{
7997 struct bnx2x *bp = netdev_priv(dev);
7998 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7999
8000 mutex_lock(&bp->cnic_mutex);
8001 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8002 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8003 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8004 }
8005 cp->drv_state = 0;
8006 rcu_assign_pointer(bp->cnic_ops, NULL);
8007 mutex_unlock(&bp->cnic_mutex);
8008 synchronize_rcu();
8009 kfree(bp->cnic_kwq);
8010 bp->cnic_kwq = NULL;
8011
8012 return 0;
8013}
8014
8015struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8016{
8017 struct bnx2x *bp = netdev_priv(dev);
8018 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8019
8020 cp->drv_owner = THIS_MODULE;
8021 cp->chip_id = CHIP_ID(bp);
8022 cp->pdev = bp->pdev;
8023 cp->io_base = bp->regview;
8024 cp->io_base2 = bp->doorbells;
8025 cp->max_kwqe_pending = 8;
8026 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8027 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8028 cp->ctx_tbl_len = CNIC_ILT_LINES;
8029 cp->starting_cid = BCM_CNIC_CID_START;
8030 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8031 cp->drv_ctl = bnx2x_drv_ctl;
8032 cp->drv_register_cnic = bnx2x_register_cnic;
8033 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8034
8035 return cp;
8036}
8037EXPORT_SYMBOL(bnx2x_cnic_probe);
8038
8039#endif /* BCM_CNIC */
8040
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
new file mode 100644
index 00000000000..a1f3bf0cd63
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -0,0 +1,5364 @@
1/* bnx2x_reg.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * The registers description starts with the register Access type followed
10 * by size in bits. For example [RW 32]. The access types are:
11 * R - Read only
12 * RC - Clear on read
13 * RW - Read/Write
14 * ST - Statistics register (clear on read)
15 * W - Write only
16 * WB - Wide bus register - the size is over 32 bits and it should be
17 * read/write in consecutive 32 bits accesses
18 * WR - Write Clear (write 1 to clear the bit)
19 *
20 */
21
22
23/* [R 19] Interrupt register #0 read */
24#define BRB1_REG_BRB1_INT_STS 0x6011c
25/* [RW 4] Parity mask register #0 read/write */
26#define BRB1_REG_BRB1_PRTY_MASK 0x60138
27/* [R 4] Parity register #0 read */
28#define BRB1_REG_BRB1_PRTY_STS 0x6012c
29/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
30 address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
31 BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */
32#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
33/* [RW 10] The number of free blocks above which the High_llfc signal to
34 interface #n is de-asserted. */
35#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
36/* [RW 10] The number of free blocks below which the High_llfc signal to
37 interface #n is asserted. */
38#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
39/* [RW 23] LL RAM data. */
40#define BRB1_REG_LL_RAM 0x61000
41/* [RW 10] The number of free blocks above which the Low_llfc signal to
42 interface #n is de-asserted. */
43#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
44/* [RW 10] The number of free blocks below which the Low_llfc signal to
45 interface #n is asserted. */
46#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
47/* [R 24] The number of full blocks. */
48#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
49/* [ST 32] The number of cycles that the write_full signal towards MAC #0
50 was asserted. */
51#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
52#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
53#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
54/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
55 asserted. */
56#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
57#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
58/* [RW 10] Write client 0: De-assert pause threshold. */
59#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
60#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
61/* [RW 10] Write client 0: Assert pause threshold. */
62#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
63#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
64/* [R 24] The number of full blocks occupied by port. */
65#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
66/* [RW 1] Reset the design by software. */
67#define BRB1_REG_SOFT_RESET 0x600dc
68/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
69#define CCM_REG_CAM_OCCUP 0xd0188
70/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
71 acknowledge output is deasserted; all other signals are treated as usual;
72 if 1 - normal activity. */
73#define CCM_REG_CCM_CFC_IFEN 0xd003c
74/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
75 disregarded; valid is deasserted; all other signals are treated as usual;
76 if 1 - normal activity. */
77#define CCM_REG_CCM_CQM_IFEN 0xd000c
78/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
79 Otherwise 0 is inserted. */
80#define CCM_REG_CCM_CQM_USE_Q 0xd00c0
81/* [RW 11] Interrupt mask register #0 read/write */
82#define CCM_REG_CCM_INT_MASK 0xd01e4
83/* [R 11] Interrupt register #0 read */
84#define CCM_REG_CCM_INT_STS 0xd01d8
85/* [R 27] Parity register #0 read */
86#define CCM_REG_CCM_PRTY_STS 0xd01e8
87/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
88 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
89 Is used to determine the number of the AG context REG-pairs written back;
90 when the input message Reg1WbFlg isn't set. */
91#define CCM_REG_CCM_REG0_SZ 0xd00c4
92/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
93 disregarded; valid is deasserted; all other signals are treated as usual;
94 if 1 - normal activity. */
95#define CCM_REG_CCM_STORM0_IFEN 0xd0004
96/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
97 disregarded; valid is deasserted; all other signals are treated as usual;
98 if 1 - normal activity. */
99#define CCM_REG_CCM_STORM1_IFEN 0xd0008
100/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
101 disregarded; valid output is deasserted; all other signals are treated as
102 usual; if 1 - normal activity. */
103#define CCM_REG_CDU_AG_RD_IFEN 0xd0030
104/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
105 are disregarded; all other signals are treated as usual; if 1 - normal
106 activity. */
107#define CCM_REG_CDU_AG_WR_IFEN 0xd002c
108/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
109 disregarded; valid output is deasserted; all other signals are treated as
110 usual; if 1 - normal activity. */
111#define CCM_REG_CDU_SM_RD_IFEN 0xd0038
112/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
113 input is disregarded; all other signals are treated as usual; if 1 -
114 normal activity. */
115#define CCM_REG_CDU_SM_WR_IFEN 0xd0034
116/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
117 the initial credit value; read returns the current value of the credit
118 counter. Must be initialized to 1 at start-up. */
119#define CCM_REG_CFC_INIT_CRD 0xd0204
120/* [RW 2] Auxillary counter flag Q number 1. */
121#define CCM_REG_CNT_AUX1_Q 0xd00c8
122/* [RW 2] Auxillary counter flag Q number 2. */
123#define CCM_REG_CNT_AUX2_Q 0xd00cc
124/* [RW 28] The CM header value for QM request (primary). */
125#define CCM_REG_CQM_CCM_HDR_P 0xd008c
126/* [RW 28] The CM header value for QM request (secondary). */
127#define CCM_REG_CQM_CCM_HDR_S 0xd0090
128/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
129 acknowledge output is deasserted; all other signals are treated as usual;
130 if 1 - normal activity. */
131#define CCM_REG_CQM_CCM_IFEN 0xd0014
132/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
133 the initial credit value; read returns the current value of the credit
134 counter. Must be initialized to 32 at start-up. */
135#define CCM_REG_CQM_INIT_CRD 0xd020c
136/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
137 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
138 prioritised); 2 stands for weight 2; tc. */
139#define CCM_REG_CQM_P_WEIGHT 0xd00b8
140/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
141 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
142 prioritised); 2 stands for weight 2; tc. */
143#define CCM_REG_CQM_S_WEIGHT 0xd00bc
144/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
145 acknowledge output is deasserted; all other signals are treated as usual;
146 if 1 - normal activity. */
147#define CCM_REG_CSDM_IFEN 0xd0018
148/* [RC 1] Set when the message length mismatch (relative to last indication)
149 at the SDM interface is detected. */
150#define CCM_REG_CSDM_LENGTH_MIS 0xd0170
151/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
152 weight 8 (the most prioritised); 1 stands for weight 1(least
153 prioritised); 2 stands for weight 2; tc. */
154#define CCM_REG_CSDM_WEIGHT 0xd00b4
155/* [RW 28] The CM header for QM formatting in case of an error in the QM
156 inputs. */
157#define CCM_REG_ERR_CCM_HDR 0xd0094
158/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
159#define CCM_REG_ERR_EVNT_ID 0xd0098
160/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
161 writes the initial credit value; read returns the current value of the
162 credit counter. Must be initialized to 64 at start-up. */
163#define CCM_REG_FIC0_INIT_CRD 0xd0210
164/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
165 writes the initial credit value; read returns the current value of the
166 credit counter. Must be initialized to 64 at start-up. */
167#define CCM_REG_FIC1_INIT_CRD 0xd0214
168/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
169 - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
170 ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
171 ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
172 outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
173#define CCM_REG_GR_ARB_TYPE 0xd015c
174/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
175 highest priority is 3. It is supposed; that the Store channel priority is
176 the compliment to 4 of the rest priorities - Aggregation channel; Load
177 (FIC0) channel and Load (FIC1). */
178#define CCM_REG_GR_LD0_PR 0xd0164
179/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
180 highest priority is 3. It is supposed; that the Store channel priority is
181 the compliment to 4 of the rest priorities - Aggregation channel; Load
182 (FIC0) channel and Load (FIC1). */
183#define CCM_REG_GR_LD1_PR 0xd0168
184/* [RW 2] General flags index. */
185#define CCM_REG_INV_DONE_Q 0xd0108
186/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
187 context and sent to STORM; for a specific connection type. The double
188 REG-pairs are used in order to align to STORM context row size of 128
189 bits. The offset of these data in the STORM context is always 0. Index
190 _(0..15) stands for the connection type (one of 16). */
191#define CCM_REG_N_SM_CTX_LD_0 0xd004c
192#define CCM_REG_N_SM_CTX_LD_1 0xd0050
193#define CCM_REG_N_SM_CTX_LD_2 0xd0054
194#define CCM_REG_N_SM_CTX_LD_3 0xd0058
195#define CCM_REG_N_SM_CTX_LD_4 0xd005c
196/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
197 acknowledge output is deasserted; all other signals are treated as usual;
198 if 1 - normal activity. */
199#define CCM_REG_PBF_IFEN 0xd0028
200/* [RC 1] Set when the message length mismatch (relative to last indication)
201 at the pbf interface is detected. */
202#define CCM_REG_PBF_LENGTH_MIS 0xd0180
203/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
204 weight 8 (the most prioritised); 1 stands for weight 1(least
205 prioritised); 2 stands for weight 2; tc. */
206#define CCM_REG_PBF_WEIGHT 0xd00ac
207#define CCM_REG_PHYS_QNUM1_0 0xd0134
208#define CCM_REG_PHYS_QNUM1_1 0xd0138
209#define CCM_REG_PHYS_QNUM2_0 0xd013c
210#define CCM_REG_PHYS_QNUM2_1 0xd0140
211#define CCM_REG_PHYS_QNUM3_0 0xd0144
212#define CCM_REG_PHYS_QNUM3_1 0xd0148
213#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
214#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
215#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
216#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
217#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
218#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
219#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
220#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
221/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
222 disregarded; acknowledge output is deasserted; all other signals are
223 treated as usual; if 1 - normal activity. */
224#define CCM_REG_STORM_CCM_IFEN 0xd0010
225/* [RC 1] Set when the message length mismatch (relative to last indication)
226 at the STORM interface is detected. */
227#define CCM_REG_STORM_LENGTH_MIS 0xd016c
228/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
229 mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
230 weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
231 tc. */
232#define CCM_REG_STORM_WEIGHT 0xd009c
233/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
234 disregarded; acknowledge output is deasserted; all other signals are
235 treated as usual; if 1 - normal activity. */
236#define CCM_REG_TSEM_IFEN 0xd001c
237/* [RC 1] Set when the message length mismatch (relative to last indication)
238 at the tsem interface is detected. */
239#define CCM_REG_TSEM_LENGTH_MIS 0xd0174
240/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
241 weight 8 (the most prioritised); 1 stands for weight 1(least
242 prioritised); 2 stands for weight 2; tc. */
243#define CCM_REG_TSEM_WEIGHT 0xd00a0
244/* [RW 1] Input usem Interface enable. If 0 - the valid input is
245 disregarded; acknowledge output is deasserted; all other signals are
246 treated as usual; if 1 - normal activity. */
247#define CCM_REG_USEM_IFEN 0xd0024
248/* [RC 1] Set when message length mismatch (relative to last indication) at
249 the usem interface is detected. */
250#define CCM_REG_USEM_LENGTH_MIS 0xd017c
251/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
252 weight 8 (the most prioritised); 1 stands for weight 1(least
253 prioritised); 2 stands for weight 2; tc. */
254#define CCM_REG_USEM_WEIGHT 0xd00a8
255/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
256 disregarded; acknowledge output is deasserted; all other signals are
257 treated as usual; if 1 - normal activity. */
258#define CCM_REG_XSEM_IFEN 0xd0020
259/* [RC 1] Set when the message length mismatch (relative to last indication)
260 at the xsem interface is detected. */
261#define CCM_REG_XSEM_LENGTH_MIS 0xd0178
262/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
263 weight 8 (the most prioritised); 1 stands for weight 1(least
264 prioritised); 2 stands for weight 2; tc. */
265#define CCM_REG_XSEM_WEIGHT 0xd00a4
266/* [RW 19] Indirect access to the descriptor table of the XX protection
267 mechanism. The fields are: [5:0] - message length; [12:6] - message
268 pointer; 18:13] - next pointer. */
269#define CCM_REG_XX_DESCR_TABLE 0xd0300
270#define CCM_REG_XX_DESCR_TABLE_SIZE 36
271/* [R 7] Used to read the value of XX protection Free counter. */
272#define CCM_REG_XX_FREE 0xd0184
273/* [RW 6] Initial value for the credit counter; responsible for fulfilling
274 of the Input Stage XX protection buffer by the XX protection pending
275 messages. Max credit available - 127. Write writes the initial credit
276 value; read returns the current value of the credit counter. Must be
277 initialized to maximum XX protected message size - 2 at start-up. */
278#define CCM_REG_XX_INIT_CRD 0xd0220
279/* [RW 7] The maximum number of pending messages; which may be stored in XX
280 protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
281 At write comprises the start value of the ~ccm_registers_xx_free.xx_free
282 counter. */
283#define CCM_REG_XX_MSG_NUM 0xd0224
284/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
285#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044
286/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
287 The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
288 header pointer. */
289#define CCM_REG_XX_TABLE 0xd0280
290#define CDU_REG_CDU_CHK_MASK0 0x101000
291#define CDU_REG_CDU_CHK_MASK1 0x101004
292#define CDU_REG_CDU_CONTROL0 0x101008
293#define CDU_REG_CDU_DEBUG 0x101010
294#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020
295/* [RW 7] Interrupt mask register #0 read/write */
296#define CDU_REG_CDU_INT_MASK 0x10103c
297/* [R 7] Interrupt register #0 read */
298#define CDU_REG_CDU_INT_STS 0x101030
299/* [RW 5] Parity mask register #0 read/write */
300#define CDU_REG_CDU_PRTY_MASK 0x10104c
301/* [R 5] Parity register #0 read */
302#define CDU_REG_CDU_PRTY_STS 0x101040
303/* [RC 32] logging of error data in case of a CDU load error:
304 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
305 ype_error; ctual_active; ctual_compressed_context}; */
306#define CDU_REG_ERROR_DATA 0x101014
307/* [WB 216] L1TT ram access. each entry has the following format :
308 {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
309 ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
310#define CDU_REG_L1TT 0x101800
311/* [WB 24] MATT ram access. each entry has the following
312 format:{RegionLength[11:0]; egionOffset[11:0]} */
313#define CDU_REG_MATT 0x101100
314/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
315#define CDU_REG_MF_MODE 0x101050
316/* [R 1] indication the initializing the activity counter by the hardware
317 was done. */
318#define CFC_REG_AC_INIT_DONE 0x104078
319/* [RW 13] activity counter ram access */
320#define CFC_REG_ACTIVITY_COUNTER 0x104400
321#define CFC_REG_ACTIVITY_COUNTER_SIZE 256
322/* [R 1] indication the initializing the cams by the hardware was done. */
323#define CFC_REG_CAM_INIT_DONE 0x10407c
324/* [RW 2] Interrupt mask register #0 read/write */
325#define CFC_REG_CFC_INT_MASK 0x104108
326/* [R 2] Interrupt register #0 read */
327#define CFC_REG_CFC_INT_STS 0x1040fc
328/* [RC 2] Interrupt register #0 read clear */
329#define CFC_REG_CFC_INT_STS_CLR 0x104100
330/* [RW 4] Parity mask register #0 read/write */
331#define CFC_REG_CFC_PRTY_MASK 0x104118
332/* [R 4] Parity register #0 read */
333#define CFC_REG_CFC_PRTY_STS 0x10410c
334/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
335#define CFC_REG_CID_CAM 0x104800
336#define CFC_REG_CONTROL0 0x104028
337#define CFC_REG_DEBUG0 0x104050
338/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
339 vector) whether the cfc should be disabled upon it */
340#define CFC_REG_DISABLE_ON_ERROR 0x104044
341/* [RC 14] CFC error vector. when the CFC detects an internal error it will
342 set one of these bits. the bit description can be found in CFC
343 specifications */
344#define CFC_REG_ERROR_VECTOR 0x10403c
345/* [WB 93] LCID info ram access */
346#define CFC_REG_INFO_RAM 0x105000
347#define CFC_REG_INFO_RAM_SIZE 1024
348#define CFC_REG_INIT_REG 0x10404c
349#define CFC_REG_INTERFACES 0x104058
350/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
351 field allows changing the priorities of the weighted-round-robin arbiter
352 which selects which CFC load client should be served next */
353#define CFC_REG_LCREQ_WEIGHTS 0x104084
354/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
355#define CFC_REG_LINK_LIST 0x104c00
356#define CFC_REG_LINK_LIST_SIZE 256
357/* [R 1] indication the initializing the link list by the hardware was done. */
358#define CFC_REG_LL_INIT_DONE 0x104074
359/* [R 9] Number of allocated LCIDs which are at empty state */
360#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
361/* [R 9] Number of Arriving LCIDs in Link List Block */
362#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
363/* [R 9] Number of Leaving LCIDs in Link List Block */
364#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
365/* [RW 8] The event id for aggregated interrupt 0 */
366#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
367#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
368#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
369#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
370#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
371#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
372#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
373#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
374#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
375#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
376#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
377#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
378#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
379#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
380#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
381#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
382/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
383 or auto-mask-mode (1) */
384#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
385#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
386#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
387#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
388#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
389#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
390#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
391#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
392#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
393#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
394#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
395/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
396#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
397/* [RW 16] The maximum value of the competion counter #0 */
398#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
399/* [RW 16] The maximum value of the competion counter #1 */
400#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
401/* [RW 16] The maximum value of the competion counter #2 */
402#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
403/* [RW 16] The maximum value of the competion counter #3 */
404#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
405/* [RW 13] The start address in the internal RAM for the completion
406 counters. */
407#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c
408/* [RW 32] Interrupt mask register #0 read/write */
409#define CSDM_REG_CSDM_INT_MASK_0 0xc229c
410#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
411/* [R 32] Interrupt register #0 read */
412#define CSDM_REG_CSDM_INT_STS_0 0xc2290
413#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
414/* [RW 11] Parity mask register #0 read/write */
415#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
416/* [R 11] Parity register #0 read */
417#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
418#define CSDM_REG_ENABLE_IN1 0xc2238
419#define CSDM_REG_ENABLE_IN2 0xc223c
420#define CSDM_REG_ENABLE_OUT1 0xc2240
421#define CSDM_REG_ENABLE_OUT2 0xc2244
422/* [RW 4] The initial number of messages that can be sent to the pxp control
423 interface without receiving any ACK. */
424#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc
425/* [ST 32] The number of ACK after placement messages received */
426#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c
427/* [ST 32] The number of packet end messages received from the parser */
428#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274
429/* [ST 32] The number of requests received from the pxp async if */
430#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278
431/* [ST 32] The number of commands received in queue 0 */
432#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248
433/* [ST 32] The number of commands received in queue 10 */
434#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c
435/* [ST 32] The number of commands received in queue 11 */
436#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270
437/* [ST 32] The number of commands received in queue 1 */
438#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c
439/* [ST 32] The number of commands received in queue 3 */
440#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250
441/* [ST 32] The number of commands received in queue 4 */
442#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254
443/* [ST 32] The number of commands received in queue 5 */
444#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258
445/* [ST 32] The number of commands received in queue 6 */
446#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c
447/* [ST 32] The number of commands received in queue 7 */
448#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260
449/* [ST 32] The number of commands received in queue 8 */
450#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264
451/* [ST 32] The number of commands received in queue 9 */
452#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268
453/* [RW 13] The start address in the internal RAM for queue counters */
454#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010
455/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
456#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548
457/* [R 1] parser fifo empty in sdm_sync block */
458#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550
459/* [R 1] parser serial fifo empty in sdm_sync block */
460#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558
461/* [RW 32] Tick for timer counter. Applicable only when
462 ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
463#define CSDM_REG_TIMER_TICK 0xc2000
464/* [RW 5] The number of time_slots in the arbitration cycle */
465#define CSEM_REG_ARB_CYCLE_SIZE 0x200034
466/* [RW 3] The source that is associated with arbitration element 0. Source
467 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
468 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
469#define CSEM_REG_ARB_ELEMENT0 0x200020
470/* [RW 3] The source that is associated with arbitration element 1. Source
471 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
472 sleeping thread with priority 1; 4- sleeping thread with priority 2.
473 Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
474#define CSEM_REG_ARB_ELEMENT1 0x200024
475/* [RW 3] The source that is associated with arbitration element 2. Source
476 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
477 sleeping thread with priority 1; 4- sleeping thread with priority 2.
478 Could not be equal to register ~csem_registers_arb_element0.arb_element0
479 and ~csem_registers_arb_element1.arb_element1 */
480#define CSEM_REG_ARB_ELEMENT2 0x200028
481/* [RW 3] The source that is associated with arbitration element 3. Source
482 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
483 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
484 not be equal to register ~csem_registers_arb_element0.arb_element0 and
485 ~csem_registers_arb_element1.arb_element1 and
486 ~csem_registers_arb_element2.arb_element2 */
487#define CSEM_REG_ARB_ELEMENT3 0x20002c
488/* [RW 3] The source that is associated with arbitration element 4. Source
489 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
490 sleeping thread with priority 1; 4- sleeping thread with priority 2.
491 Could not be equal to register ~csem_registers_arb_element0.arb_element0
492 and ~csem_registers_arb_element1.arb_element1 and
493 ~csem_registers_arb_element2.arb_element2 and
494 ~csem_registers_arb_element3.arb_element3 */
495#define CSEM_REG_ARB_ELEMENT4 0x200030
496/* [RW 32] Interrupt mask register #0 read/write */
497#define CSEM_REG_CSEM_INT_MASK_0 0x200110
498#define CSEM_REG_CSEM_INT_MASK_1 0x200120
499/* [R 32] Interrupt register #0 read */
500#define CSEM_REG_CSEM_INT_STS_0 0x200104
501#define CSEM_REG_CSEM_INT_STS_1 0x200114
502/* [RW 32] Parity mask register #0 read/write */
503#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
504#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
505/* [R 32] Parity register #0 read */
506#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
507#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
508#define CSEM_REG_ENABLE_IN 0x2000a4
509#define CSEM_REG_ENABLE_OUT 0x2000a8
510/* [RW 32] This address space contains all registers and memories that are
511 placed in SEM_FAST block. The SEM_FAST registers are described in
512 appendix B. In order to access the sem_fast registers the base address
513 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
514#define CSEM_REG_FAST_MEMORY 0x220000
515/* [RW 1] Disables input messages from FIC0 May be updated during run_time
516 by the microcode */
517#define CSEM_REG_FIC0_DISABLE 0x200224
518/* [RW 1] Disables input messages from FIC1 May be updated during run_time
519 by the microcode */
520#define CSEM_REG_FIC1_DISABLE 0x200234
521/* [RW 15] Interrupt table Read and write access to it is not possible in
522 the middle of the work */
523#define CSEM_REG_INT_TABLE 0x200400
524/* [ST 24] Statistics register. The number of messages that entered through
525 FIC0 */
526#define CSEM_REG_MSG_NUM_FIC0 0x200000
527/* [ST 24] Statistics register. The number of messages that entered through
528 FIC1 */
529#define CSEM_REG_MSG_NUM_FIC1 0x200004
530/* [ST 24] Statistics register. The number of messages that were sent to
531 FOC0 */
532#define CSEM_REG_MSG_NUM_FOC0 0x200008
533/* [ST 24] Statistics register. The number of messages that were sent to
534 FOC1 */
535#define CSEM_REG_MSG_NUM_FOC1 0x20000c
536/* [ST 24] Statistics register. The number of messages that were sent to
537 FOC2 */
538#define CSEM_REG_MSG_NUM_FOC2 0x200010
539/* [ST 24] Statistics register. The number of messages that were sent to
540 FOC3 */
541#define CSEM_REG_MSG_NUM_FOC3 0x200014
542/* [RW 1] Disables input messages from the passive buffer May be updated
543 during run_time by the microcode */
544#define CSEM_REG_PAS_DISABLE 0x20024c
545/* [WB 128] Debug only. Passive buffer memory */
546#define CSEM_REG_PASSIVE_BUFFER 0x202000
547/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
548#define CSEM_REG_PRAM 0x240000
549/* [R 16] Valid sleeping threads indication have bit per thread */
550#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c
551/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
552#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0
553/* [RW 16] List of free threads . There is a bit per thread. */
554#define CSEM_REG_THREADS_LIST 0x2002e4
555/* [RW 3] The arbitration scheme of time_slot 0 */
556#define CSEM_REG_TS_0_AS 0x200038
557/* [RW 3] The arbitration scheme of time_slot 10 */
558#define CSEM_REG_TS_10_AS 0x200060
559/* [RW 3] The arbitration scheme of time_slot 11 */
560#define CSEM_REG_TS_11_AS 0x200064
561/* [RW 3] The arbitration scheme of time_slot 12 */
562#define CSEM_REG_TS_12_AS 0x200068
563/* [RW 3] The arbitration scheme of time_slot 13 */
564#define CSEM_REG_TS_13_AS 0x20006c
565/* [RW 3] The arbitration scheme of time_slot 14 */
566#define CSEM_REG_TS_14_AS 0x200070
567/* [RW 3] The arbitration scheme of time_slot 15 */
568#define CSEM_REG_TS_15_AS 0x200074
569/* [RW 3] The arbitration scheme of time_slot 16 */
570#define CSEM_REG_TS_16_AS 0x200078
571/* [RW 3] The arbitration scheme of time_slot 17 */
572#define CSEM_REG_TS_17_AS 0x20007c
573/* [RW 3] The arbitration scheme of time_slot 18 */
574#define CSEM_REG_TS_18_AS 0x200080
575/* [RW 3] The arbitration scheme of time_slot 1 */
576#define CSEM_REG_TS_1_AS 0x20003c
577/* [RW 3] The arbitration scheme of time_slot 2 */
578#define CSEM_REG_TS_2_AS 0x200040
579/* [RW 3] The arbitration scheme of time_slot 3 */
580#define CSEM_REG_TS_3_AS 0x200044
581/* [RW 3] The arbitration scheme of time_slot 4 */
582#define CSEM_REG_TS_4_AS 0x200048
583/* [RW 3] The arbitration scheme of time_slot 5 */
584#define CSEM_REG_TS_5_AS 0x20004c
585/* [RW 3] The arbitration scheme of time_slot 6 */
586#define CSEM_REG_TS_6_AS 0x200050
587/* [RW 3] The arbitration scheme of time_slot 7 */
588#define CSEM_REG_TS_7_AS 0x200054
589/* [RW 3] The arbitration scheme of time_slot 8 */
590#define CSEM_REG_TS_8_AS 0x200058
591/* [RW 3] The arbitration scheme of time_slot 9 */
592#define CSEM_REG_TS_9_AS 0x20005c
593/* [RW 1] Parity mask register #0 read/write */
594#define DBG_REG_DBG_PRTY_MASK 0xc0a8
595/* [R 1] Parity register #0 read */
596#define DBG_REG_DBG_PRTY_STS 0xc09c
597/* [RW 32] Commands memory. The address to command X; row Y is to calculated
598 as 14*X+Y. */
599#define DMAE_REG_CMD_MEM 0x102400
600#define DMAE_REG_CMD_MEM_SIZE 224
601/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
602 initial value is all ones. */
603#define DMAE_REG_CRC16C_INIT 0x10201c
604/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
605 CRC-16 T10 initial value is all ones. */
606#define DMAE_REG_CRC16T10_INIT 0x102020
607/* [RW 2] Interrupt mask register #0 read/write */
608#define DMAE_REG_DMAE_INT_MASK 0x102054
609/* [RW 4] Parity mask register #0 read/write */
610#define DMAE_REG_DMAE_PRTY_MASK 0x102064
611/* [R 4] Parity register #0 read */
612#define DMAE_REG_DMAE_PRTY_STS 0x102058
613/* [RW 1] Command 0 go. */
614#define DMAE_REG_GO_C0 0x102080
615/* [RW 1] Command 1 go. */
616#define DMAE_REG_GO_C1 0x102084
617/* [RW 1] Command 10 go. */
618#define DMAE_REG_GO_C10 0x102088
619/* [RW 1] Command 11 go. */
620#define DMAE_REG_GO_C11 0x10208c
621/* [RW 1] Command 12 go. */
622#define DMAE_REG_GO_C12 0x102090
623/* [RW 1] Command 13 go. */
624#define DMAE_REG_GO_C13 0x102094
625/* [RW 1] Command 14 go. */
626#define DMAE_REG_GO_C14 0x102098
627/* [RW 1] Command 15 go. */
628#define DMAE_REG_GO_C15 0x10209c
629/* [RW 1] Command 2 go. */
630#define DMAE_REG_GO_C2 0x1020a0
631/* [RW 1] Command 3 go. */
632#define DMAE_REG_GO_C3 0x1020a4
633/* [RW 1] Command 4 go. */
634#define DMAE_REG_GO_C4 0x1020a8
635/* [RW 1] Command 5 go. */
636#define DMAE_REG_GO_C5 0x1020ac
637/* [RW 1] Command 6 go. */
638#define DMAE_REG_GO_C6 0x1020b0
639/* [RW 1] Command 7 go. */
640#define DMAE_REG_GO_C7 0x1020b4
641/* [RW 1] Command 8 go. */
642#define DMAE_REG_GO_C8 0x1020b8
643/* [RW 1] Command 9 go. */
644#define DMAE_REG_GO_C9 0x1020bc
645/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
646 input is disregarded; valid is deasserted; all other signals are treated
647 as usual; if 1 - normal activity. */
648#define DMAE_REG_GRC_IFEN 0x102008
649/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
650 acknowledge input is disregarded; valid is deasserted; full is asserted;
651 all other signals are treated as usual; if 1 - normal activity. */
652#define DMAE_REG_PCI_IFEN 0x102004
653/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
654 initial value to the credit counter; related to the address. Read returns
655 the current value of the counter. */
656#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0
657/* [RW 8] Aggregation command. */
658#define DORQ_REG_AGG_CMD0 0x170060
659/* [RW 8] Aggregation command. */
660#define DORQ_REG_AGG_CMD1 0x170064
661/* [RW 8] Aggregation command. */
662#define DORQ_REG_AGG_CMD2 0x170068
663/* [RW 8] Aggregation command. */
664#define DORQ_REG_AGG_CMD3 0x17006c
665/* [RW 28] UCM Header. */
666#define DORQ_REG_CMHEAD_RX 0x170050
667/* [RW 32] Doorbell address for RBC doorbells (function 0). */
668#define DORQ_REG_DB_ADDR0 0x17008c
669/* [RW 5] Interrupt mask register #0 read/write */
670#define DORQ_REG_DORQ_INT_MASK 0x170180
671/* [R 5] Interrupt register #0 read */
672#define DORQ_REG_DORQ_INT_STS 0x170174
673/* [RC 5] Interrupt register #0 read clear */
674#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
675/* [RW 2] Parity mask register #0 read/write */
676#define DORQ_REG_DORQ_PRTY_MASK 0x170190
677/* [R 2] Parity register #0 read */
678#define DORQ_REG_DORQ_PRTY_STS 0x170184
679/* [RW 8] The address to write the DPM CID to STORM. */
680#define DORQ_REG_DPM_CID_ADDR 0x170044
681/* [RW 5] The DPM mode CID extraction offset. */
682#define DORQ_REG_DPM_CID_OFST 0x170030
683/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
684#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c
685/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
686#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078
687/* [R 13] Current value of the DQ FIFO fill level according to following
688 pointer. The range is 0 - 256 FIFO rows; where each row stands for the
689 doorbell. */
690#define DORQ_REG_DQ_FILL_LVLF 0x1700a4
691/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
692 equal to full threshold; reset on full clear. */
693#define DORQ_REG_DQ_FULL_ST 0x1700c0
694/* [RW 28] The value sent to CM header in the case of CFC load error. */
695#define DORQ_REG_ERR_CMHEAD 0x170058
696#define DORQ_REG_IF_EN 0x170004
697#define DORQ_REG_MODE_ACT 0x170008
698/* [RW 5] The normal mode CID extraction offset. */
699#define DORQ_REG_NORM_CID_OFST 0x17002c
700/* [RW 28] TCM Header when only TCP context is loaded. */
701#define DORQ_REG_NORM_CMHEAD_TX 0x17004c
702/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
703 Interface. */
704#define DORQ_REG_OUTST_REQ 0x17003c
705#define DORQ_REG_REGN 0x170038
706/* [R 4] Current value of response A counter credit. Initial credit is
707 configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
708 register. */
709#define DORQ_REG_RSPA_CRD_CNT 0x1700ac
710/* [R 4] Current value of response B counter credit. Initial credit is
711 configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
712 register. */
713#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
714/* [RW 4] The initial credit at the Doorbell Response Interface. The write
715 writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
716 read reads this written value. */
717#define DORQ_REG_RSP_INIT_CRD 0x170048
718/* [RW 4] Initial activity counter value on the load request; when the
719 shortcut is done. */
720#define DORQ_REG_SHRT_ACT_CNT 0x170070
721/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
722#define DORQ_REG_SHRT_CMHEAD 0x170054
723#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
724#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
725#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
726#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
727#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
728#define HC_REG_AGG_INT_0 0x108050
729#define HC_REG_AGG_INT_1 0x108054
730#define HC_REG_ATTN_BIT 0x108120
731#define HC_REG_ATTN_IDX 0x108100
732#define HC_REG_ATTN_MSG0_ADDR_L 0x108018
733#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
734#define HC_REG_ATTN_NUM_P0 0x108038
735#define HC_REG_ATTN_NUM_P1 0x10803c
736#define HC_REG_COMMAND_REG 0x108180
737#define HC_REG_CONFIG_0 0x108000
738#define HC_REG_CONFIG_1 0x108004
739#define HC_REG_FUNC_NUM_P0 0x1080ac
740#define HC_REG_FUNC_NUM_P1 0x1080b0
741/* [RW 3] Parity mask register #0 read/write */
742#define HC_REG_HC_PRTY_MASK 0x1080a0
743/* [R 3] Parity register #0 read */
744#define HC_REG_HC_PRTY_STS 0x108094
745#define HC_REG_INT_MASK 0x108108
746#define HC_REG_LEADING_EDGE_0 0x108040
747#define HC_REG_LEADING_EDGE_1 0x108048
748#define HC_REG_P0_PROD_CONS 0x108200
749#define HC_REG_P1_PROD_CONS 0x108400
750#define HC_REG_PBA_COMMAND 0x108140
751#define HC_REG_PCI_CONFIG_0 0x108010
752#define HC_REG_PCI_CONFIG_1 0x108014
753#define HC_REG_STATISTIC_COUNTERS 0x109000
754#define HC_REG_TRAILING_EDGE_0 0x108044
755#define HC_REG_TRAILING_EDGE_1 0x10804c
756#define HC_REG_UC_RAM_ADDR_0 0x108028
757#define HC_REG_UC_RAM_ADDR_1 0x108030
758#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
759#define HC_REG_VQID_0 0x108008
760#define HC_REG_VQID_1 0x10800c
761#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
762#define MCP_REG_MCPR_NVM_ADDR 0x8640c
763#define MCP_REG_MCPR_NVM_CFG4 0x8642c
764#define MCP_REG_MCPR_NVM_COMMAND 0x86400
765#define MCP_REG_MCPR_NVM_READ 0x86410
766#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
767#define MCP_REG_MCPR_NVM_WRITE 0x86408
768#define MCP_REG_MCPR_SCRATCH 0xa0000
769#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
770#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
771/* [R 32] read first 32 bit after inversion of function 0. mapped as
772 follows: [0] NIG attention for function0; [1] NIG attention for
773 function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
774 [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
775 GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
776 glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
777 [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
778 MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
779 Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
780 interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
781 error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
782 interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
783 Parity error; [31] PBF Hw interrupt; */
784#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c
785#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430
786/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
787 NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
788 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
789 [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
790 PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
791 function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
792 Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
793 mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
794 BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
795 Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
796 interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
797 Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
798 interrupt; */
799#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434
800/* [R 32] read second 32 bit after inversion of function 0. mapped as
801 follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
802 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
803 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
804 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
805 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
806 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
807 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
808 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
809 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
810 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
811 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
812 interrupt; */
813#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438
814#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c
815/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
816 PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
817 [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
818 [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
819 XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
820 DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
821 error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
822 PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
823 [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
824 [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
825 [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
826 [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
827#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440
828/* [R 32] read third 32 bit after inversion of function 0. mapped as
829 follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
830 error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
831 PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
832 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
833 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
834 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
835 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
836 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
837 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
838 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
839 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
840 attn1; */
841#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444
842#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448
843/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
844 CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
845 Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
846 Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
847 error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
848 interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
849 MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
850 Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
851 timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
852 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
853 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
854 timers attn_4 func1; [30] General attn0; [31] General attn1; */
855#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c
856/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
857 follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
858 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
859 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
860 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
861 [14] General attn16; [15] General attn17; [16] General attn18; [17]
862 General attn19; [18] General attn20; [19] General attn21; [20] Main power
863 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
864 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
865 Latched timeout attention; [27] GRC Latched reserved access attention;
866 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
867 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
868#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450
869#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454
870/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
871 General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
872 [4] General attn6; [5] General attn7; [6] General attn8; [7] General
873 attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
874 General attn13; [12] General attn14; [13] General attn15; [14] General
875 attn16; [15] General attn17; [16] General attn18; [17] General attn19;
876 [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
877 RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
878 RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
879 attention; [27] GRC Latched reserved access attention; [28] MCP Latched
880 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
881 ump_tx_parity; [31] MCP Latched scpad_parity; */
882#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
883/* [W 14] write to this register results with the clear of the latched
884 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
885 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
886 latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
887 GRC Latched reserved access attention; one in d7 clears Latched
888 rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
889 Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
890 ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
891 pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
892 from this register return zero */
893#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
894/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
895 as follows: [0] NIG attention for function0; [1] NIG attention for
896 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
897 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
898 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
899 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
900 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
901 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
902 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
903 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
904 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
905 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
906 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
907#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
908#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
909#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
910#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
911#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
912#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
913#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
914/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
915 as follows: [0] NIG attention for function0; [1] NIG attention for
916 function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
917 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
918 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
919 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
920 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
921 SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
922 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
923 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
924 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
925 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
926 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
927#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
928#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
929#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
930#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
931#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
932#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
933#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
934/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
935 as follows: [0] NIG attention for function0; [1] NIG attention for
936 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
937 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
938 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
939 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
940 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
941 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
942 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
943 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
944 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
945 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
946 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
947#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
948#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
949/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
950 as follows: [0] NIG attention for function0; [1] NIG attention for
951 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
952 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
953 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
954 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
955 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
956 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
957 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
958 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
959 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
960 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
961 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
962#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc
963#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c
964/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
965 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
966 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
967 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
968 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
969 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
970 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
971 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
972 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
973 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
974 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
975 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
976 interrupt; */
977#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070
978#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080
979/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
980 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
981 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
982 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
983 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
984 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
985 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
986 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
987 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
988 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
989 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
990 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
991 interrupt; */
992#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
993#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
994/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
995 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
996 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
997 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
998 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
999 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1000 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1001 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1002 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1003 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1004 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1005 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1006 interrupt; */
1007#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
1008#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
1009/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
1010 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
1011 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
1012 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
1013 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
1014 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1015 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1016 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1017 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1018 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1019 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1020 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1021 interrupt; */
1022#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
1023#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
1024/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
1025 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1026 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1027 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1028 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1029 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1030 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1031 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1032 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1033 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1034 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1035 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1036 attn1; */
1037#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074
1038#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084
1039/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
1040 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1041 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1042 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1043 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1044 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1045 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1046 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1047 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1048 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1049 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1050 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1051 attn1; */
1052#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
1053#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
1054/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
1055 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1056 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1057 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1058 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1059 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1060 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1061 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1062 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1063 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1064 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1065 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1066 attn1; */
1067#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
1068#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
1069/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
1070 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1071 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1072 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1073 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1074 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1075 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1076 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1077 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1078 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1079 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1080 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1081 attn1; */
1082#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
1083#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
1084/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
1085 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1086 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1087 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1088 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1089 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1090 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1091 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1092 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1093 Latched timeout attention; [27] GRC Latched reserved access attention;
1094 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1095 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1096#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
1097#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
1098#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
1099#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
1100#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
1101#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
1102/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
1103 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1104 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1105 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1106 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1107 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1108 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1109 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1110 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1111 Latched timeout attention; [27] GRC Latched reserved access attention;
1112 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1113 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1114#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
1115#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
1116#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
1117#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
1118#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
1119#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
1120/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
1121 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1122 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1123 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1124 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1125 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1126 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1127 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1128 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1129 Latched timeout attention; [27] GRC Latched reserved access attention;
1130 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1131 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1132#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
1133#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
1134/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
1135 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1136 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1137 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1138 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1139 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1140 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1141 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1142 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1143 Latched timeout attention; [27] GRC Latched reserved access attention;
1144 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1145 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1146#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
1147#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
1148/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
1149 128 bit vector */
1150#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
1151#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004
1152#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
1153#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
1154#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
1155#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
1156#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c
1157#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
1158#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
1159#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
1160#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
1161#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
1162#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
1163#define MISC_REG_AEU_GENERAL_MASK 0xa61c
1164/* [RW 32] first 32b for inverting the input for function 0; for each bit:
1165 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
1166 function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
1167 [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
1168 [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
1169 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
1170 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
1171 SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
1172 for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
1173 Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
1174 interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
1175 Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
1176 Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
1177#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c
1178#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c
1179/* [RW 32] second 32b for inverting the input for function 0; for each bit:
1180 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
1181 error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
1182 interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
1183 Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
1184 interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
1185 DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
1186 error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
1187 PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
1188 [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
1189 [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
1190 [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
1191 [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
1192#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
1193#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
1194/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
1195 [9:8] = raserved. Zero = mask; one = unmask */
1196#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
1197#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
1198/* [RW 1] If set a system kill occurred */
1199#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
1200/* [RW 32] Represent the status of the input vector to the AEU when a system
1201 kill occurred. The register is reset in por reset. Mapped as follows: [0]
1202 NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
1203 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
1204 [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
1205 PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
1206 function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
1207 Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
1208 mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
1209 BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
1210 Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
1211 interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
1212 Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
1213 interrupt; */
1214#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
1215#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
1216#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
1217#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
1218/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
1219 Port. */
1220#define MISC_REG_BOND_ID 0xa400
1221/* [R 8] These bits indicate the metal revision of the chip. This value
1222 starts at 0x00 for each all-layer tape-out and increments by one for each
1223 tape-out. */
1224#define MISC_REG_CHIP_METAL 0xa404
1225/* [R 16] These bits indicate the part number for the chip. */
1226#define MISC_REG_CHIP_NUM 0xa408
1227/* [R 4] These bits indicate the base revision of the chip. This value
1228 starts at 0x0 for the A0 tape-out and increments by one for each
1229 all-layer tape-out. */
1230#define MISC_REG_CHIP_REV 0xa40c
1231/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1232 32 clients. Each client can be controlled by one driver only. One in each
1233 bit represent that this driver control the appropriate client (Ex: bit 5
1234 is set means this driver control client number 5). addr1 = set; addr0 =
1235 clear; read from both addresses will give the same result = status. write
1236 to address 1 will set a request to control all the clients that their
1237 appropriate bit (in the write command) is set. if the client is free (the
1238 appropriate bit in all the other drivers is clear) one will be written to
1239 that driver register; if the client isn't free the bit will remain zero.
1240 if the appropriate bit is set (the driver request to gain control on a
1241 client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
1242 interrupt will be asserted). write to address 0 will set a request to
1243 free all the clients that their appropriate bit (in the write command) is
1244 set. if the appropriate bit is clear (the driver request to free a client
1245 it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
1246 be asserted). */
1247#define MISC_REG_DRIVER_CONTROL_1 0xa510
1248#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
1249/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
1250 only. */
1251#define MISC_REG_E1HMF_MODE 0xa5f8
1252/* [RW 32] Debug only: spare RW register reset by core reset */
1253#define MISC_REG_GENERIC_CR_0 0xa460
1254/* [RW 32] Debug only: spare RW register reset by por reset */
1255#define MISC_REG_GENERIC_POR_1 0xa474
1256/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
1257 these bits is written as a '1'; the corresponding SPIO bit will turn off
1258 it's drivers and become an input. This is the reset state of all GPIO
1259 pins. The read value of these bits will be a '1' if that last command
1260 (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
1261 [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
1262 as a '1'; the corresponding GPIO bit will drive low. The read value of
1263 these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
1264 this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
1265 SET When any of these bits is written as a '1'; the corresponding GPIO
1266 bit will drive high (if it has that capability). The read value of these
1267 bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
1268 bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
1269 RO; These bits indicate the read value of each of the eight GPIO pins.
1270 This is the result value of the pin; not the drive value. Writing these
1271 bits will have not effect. */
1272#define MISC_REG_GPIO 0xa490
1273/* [RW 8] These bits enable the GPIO_INTs to signals event to the
1274 IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
1275 p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
1276 [7] p1_gpio_3; */
1277#define MISC_REG_GPIO_EVENT_EN 0xa2bc
1278/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
1279 '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
1280 This will acknowledge an interrupt on the falling edge of corresponding
1281 GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
1282 Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
1283 register. This will acknowledge an interrupt on the rising edge of
1284 corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
1285 OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
1286 value. When the ~INT_STATE bit is set; this bit indicates the OLD value
1287 of the pin such that if ~INT_STATE is set and this bit is '0'; then the
1288 interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
1289 is '1'; then the interrupt is due to a high to low edge (reset value 0).
1290 [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
1291 current GPIO interrupt state for each GPIO pin. This bit is cleared when
1292 the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
1293 set when the GPIO input does not match the current value in #OLD_VALUE
1294 (reset value 0). */
1295#define MISC_REG_GPIO_INT 0xa494
1296/* [R 28] this field hold the last information that caused reserved
1297 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1298 [27:24] the master that caused the attention - according to the following
1299 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1300 dbu; 8 = dmae */
1301#define MISC_REG_GRC_RSV_ATTN 0xa3c0
1302/* [R 28] this field hold the last information that caused timeout
1303 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1304 [27:24] the master that caused the attention - according to the following
1305 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1306 dbu; 8 = dmae */
1307#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
1308/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
1309 access that does not finish within
1310 ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
1311 cleared; this timeout is disabled. If this timeout occurs; the GRC shall
1312 assert it attention output. */
1313#define MISC_REG_GRC_TIMEOUT_EN 0xa280
1314/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
1315 the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
1316 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
1317 (reset value 001) Charge pump current control; 111 for 720u; 011 for
1318 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
1319 Global bias control; When bit 7 is high bias current will be 10 0gh; When
1320 bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
1321 Pll_observe (reset value 010) Bits to control observability. bit 10 is
1322 for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
1323 (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
1324 and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
1325 sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
1326 internally). [14] reserved (reset value 0) Reset for VCO sequencer is
1327 connected to RESET input directly. [15] capRetry_en (reset value 0)
1328 enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
1329 value 0) bit to continuously monitor vco freq (inverted). [17]
1330 freqDetRestart_en (reset value 0) bit to enable restart when not freq
1331 locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
1332 retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
1333 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
1334 pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
1335 (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
1336 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
1337 bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
1338 enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
1339 capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
1340 restart. [27] capSelectM_en (reset value 0) bit to enable cap select
1341 register bits. */
1342#define MISC_REG_LCPLL_CTRL_1 0xa2a4
1343#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
1344/* [RW 4] Interrupt mask register #0 read/write */
1345#define MISC_REG_MISC_INT_MASK 0xa388
1346/* [RW 1] Parity mask register #0 read/write */
1347#define MISC_REG_MISC_PRTY_MASK 0xa398
1348/* [R 1] Parity register #0 read */
1349#define MISC_REG_MISC_PRTY_STS 0xa38c
1350#define MISC_REG_NIG_WOL_P0 0xa270
1351#define MISC_REG_NIG_WOL_P1 0xa274
1352/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
1353 assertion */
1354#define MISC_REG_PCIE_HOT_RESET 0xa618
1355/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
1356 inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
1357 divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
1358 divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
1359 divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
1360 divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
1361 freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
1362 (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
1363 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
1364 Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
1365 value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
1366 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
1367 [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
1368 Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
1369 testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
1370 testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
1371 testa_en (reset value 0); */
1372#define MISC_REG_PLL_STORM_CTRL_1 0xa294
1373#define MISC_REG_PLL_STORM_CTRL_2 0xa298
1374#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
1375#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
1376/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
1377 write/read zero = the specific block is in reset; addr 0-wr- the write
1378 value will be written to the register; addr 1-set - one will be written
1379 to all the bits that have the value of one in the data written (bits that
1380 have the value of zero will not be change) ; addr 2-clear - zero will be
1381 written to all the bits that have the value of one in the data written
1382 (bits that have the value of zero will not be change); addr 3-ignore;
1383 read ignore from all addr except addr 00; inside order of the bits is:
1384 [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
1385 [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
1386 rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
1387 [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
1388 Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
1389 rst_pxp_rq_rd_wr; 31:17] reserved */
1390#define MISC_REG_RESET_REG_2 0xa590
1391/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
1392 shared with the driver resides */
1393#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
1394/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
1395 the corresponding SPIO bit will turn off it's drivers and become an
1396 input. This is the reset state of all SPIO pins. The read value of these
1397 bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
1398 bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
1399 is written as a '1'; the corresponding SPIO bit will drive low. The read
1400 value of these bits will be a '1' if that last command (#SET; #CLR; or
1401#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
1402 these bits is written as a '1'; the corresponding SPIO bit will drive
1403 high (if it has that capability). The read value of these bits will be a
1404 '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
1405 (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
1406 each of the eight SPIO pins. This is the result value of the pin; not the
1407 drive value. Writing these bits will have not effect. Each 8 bits field
1408 is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
1409 from VAUX. (This is an output pin only; the FLOAT field is not applicable
1410 for this pin); [1] VAUX Disable; when pulsed low; disables supply form
1411 VAUX. (This is an output pin only; FLOAT field is not applicable for this
1412 pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
1413 select VAUX supply. (This is an output pin only; it is not controlled by
1414 the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
1415 field is not applicable for this pin; only the VALUE fields is relevant -
1416 it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
1417 Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
1418 device ID select; read by UMP firmware. */
1419#define MISC_REG_SPIO 0xa4fc
1420/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
1421 according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
1422 [7:0] reserved */
1423#define MISC_REG_SPIO_EVENT_EN 0xa2b8
1424/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
1425 corresponding bit in the #OLD_VALUE register. This will acknowledge an
1426 interrupt on the falling edge of corresponding SPIO input (reset value
1427 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
1428 in the #OLD_VALUE register. This will acknowledge an interrupt on the
1429 rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
1430 RO; These bits indicate the old value of the SPIO input value. When the
1431 ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
1432 that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
1433 to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
1434 interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
1435 RO; These bits indicate the current SPIO interrupt state for each SPIO
1436 pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
1437 command bit is written. This bit is set when the SPIO input does not
1438 match the current value in #OLD_VALUE (reset value 0). */
1439#define MISC_REG_SPIO_INT 0xa500
1440/* [RW 32] reload value for counter 4 if reload; the value will be reload if
1441 the counter reached zero and the reload bit
1442 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
1443#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
1444/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
1445 in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
1446 timer 8 */
1447#define MISC_REG_SW_TIMER_VAL 0xa5c0
1448/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
1449 loaded; 0-prepare; -unprepare */
1450#define MISC_REG_UNPREPARED 0xa424
1451#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
1452#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
1453#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
1454#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
1455#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
1456#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
1457#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
1458#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
1459#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18)
1460/* [RW 1] Input enable for RX_BMAC0 IF */
1461#define NIG_REG_BMAC0_IN_EN 0x100ac
1462/* [RW 1] output enable for TX_BMAC0 IF */
1463#define NIG_REG_BMAC0_OUT_EN 0x100e0
1464/* [RW 1] output enable for TX BMAC pause port 0 IF */
1465#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110
1466/* [RW 1] output enable for RX_BMAC0_REGS IF */
1467#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8
1468/* [RW 1] output enable for RX BRB1 port0 IF */
1469#define NIG_REG_BRB0_OUT_EN 0x100f8
1470/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
1471#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4
1472/* [RW 1] output enable for RX BRB1 port1 IF */
1473#define NIG_REG_BRB1_OUT_EN 0x100fc
1474/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
1475#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
1476/* [RW 1] output enable for RX BRB1 LP IF */
1477#define NIG_REG_BRB_LB_OUT_EN 0x10100
1478/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
1479 error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
1480 72:73]-vnic_num; 81:74]-sideband_info */
1481#define NIG_REG_DEBUG_PACKET_LB 0x10800
1482/* [RW 1] Input enable for TX Debug packet */
1483#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
1484/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
1485 packets from PBFare not forwarded to the MAC and just deleted from FIFO.
1486 First packet may be deleted from the middle. And last packet will be
1487 always deleted till the end. */
1488#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060
1489/* [RW 1] Output enable to EMAC0 */
1490#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120
1491/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
1492 to emac for port0; other way to bmac for port0 */
1493#define NIG_REG_EGRESS_EMAC0_PORT 0x10058
1494/* [RW 1] Input enable for TX PBF user packet port0 IF */
1495#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
1496/* [RW 1] Input enable for TX PBF user packet port1 IF */
1497#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0
1498/* [RW 1] Input enable for TX UMP management packet port0 IF */
1499#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4
1500/* [RW 1] Input enable for RX_EMAC0 IF */
1501#define NIG_REG_EMAC0_IN_EN 0x100a4
1502/* [RW 1] output enable for TX EMAC pause port 0 IF */
1503#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118
1504/* [R 1] status from emac0. This bit is set when MDINT from either the
1505 EXT_MDINT pin or from the Copper PHY is driven low. This condition must
1506 be cleared in the attached PHY device that is driving the MINT pin. */
1507#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494
1508/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
1509 are described in appendix A. In order to access the BMAC0 registers; the
1510 base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
1511 added to each BMAC register offset */
1512#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00
1513/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
1514 are described in appendix A. In order to access the BMAC0 registers; the
1515 base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
1516 added to each BMAC register offset */
1517#define NIG_REG_INGRESS_BMAC1_MEM 0x11000
1518/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
1519#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0
1520/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
1521 packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
1522#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
1523/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
1524 logic for interrupts must be used. Enable per bit of interrupt of
1525 ~latch_status.latch_status */
1526#define NIG_REG_LATCH_BC_0 0x16210
1527/* [RW 27] Latch for each interrupt from Unicore.b[0]
1528 status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
1529 b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
1530 b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
1531 b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
1532 b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
1533 b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
1534 b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
1535 b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
1536 b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
1537 b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
1538 b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
1539 b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
1540#define NIG_REG_LATCH_STATUS_0 0x18000
1541/* [RW 1] led 10g for port 0 */
1542#define NIG_REG_LED_10G_P0 0x10320
1543/* [RW 1] led 10g for port 1 */
1544#define NIG_REG_LED_10G_P1 0x10324
1545/* [RW 1] Port0: This bit is set to enable the use of the
1546 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
1547 defined below. If this bit is cleared; then the blink rate will be about
1548 8Hz. */
1549#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318
1550/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
1551 Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
1552 is reset to 0x080; giving a default blink period of approximately 8Hz. */
1553#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
1554/* [RW 1] Port0: If set along with the
1555 ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
1556 bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
1557 bit; the Traffic LED will blink with the blink rate specified in
1558 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
1559 ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
1560 fields. */
1561#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308
1562/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
1563 Traffic LED will then be controlled via bit ~nig_registers_
1564 led_control_traffic_p0.led_control_traffic_p0 and bit
1565 ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
1566#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8
1567/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
1568 turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
1569 set; the LED will blink with blink rate specified in
1570 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
1571 ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
1572 fields. */
1573#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300
1574/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
1575 9-11PHY7; 12 MAC4; 13-15 PHY10; */
1576#define NIG_REG_LED_MODE_P0 0x102f0
1577/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
1578 tsdm enable; b2- usdm enable */
1579#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
1580#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
1581/* [RW 1] SAFC enable for port0. This register may get 1 only when
1582 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
1583 port */
1584#define NIG_REG_LLFC_ENABLE_0 0x16208
1585/* [RW 16] classes are high-priority for port0 */
1586#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
1587/* [RW 16] classes are low-priority for port0 */
1588#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
1589/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
1590#define NIG_REG_LLFC_OUT_EN_0 0x160c8
1591#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
1592#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
1593#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
1594#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
1595/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1596#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
1597/* [RW 2] Determine the classification participants. 0: no classification.1:
1598 classification upon VLAN id. 2: classification upon MAC address. 3:
1599 classification upon both VLAN id & MAC addr. */
1600#define NIG_REG_LLH0_CLS_TYPE 0x16080
1601/* [RW 32] cm header for llh0 */
1602#define NIG_REG_LLH0_CM_HEADER 0x1007c
1603#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
1604#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
1605/* [RW 16] destination TCP address 1. The LLH will look for this address in
1606 all incoming packets. */
1607#define NIG_REG_LLH0_DEST_TCP_0 0x10220
1608/* [RW 16] destination UDP address 1 The LLH will look for this address in
1609 all incoming packets. */
1610#define NIG_REG_LLH0_DEST_UDP_0 0x10214
1611#define NIG_REG_LLH0_ERROR_MASK 0x1008c
1612/* [RW 8] event id for llh0 */
1613#define NIG_REG_LLH0_EVENT_ID 0x10084
1614#define NIG_REG_LLH0_FUNC_EN 0x160fc
1615#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
1616/* [RW 1] Determine the IP version to look for in
1617 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
1618#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
1619/* [RW 1] t bit for llh0 */
1620#define NIG_REG_LLH0_T_BIT 0x10074
1621/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
1622#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
1623/* [RW 8] init credit counter for port0 in LLH */
1624#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
1625#define NIG_REG_LLH0_XCM_MASK 0x10130
1626#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
1627/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1628#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
1629/* [RW 2] Determine the classification participants. 0: no classification.1:
1630 classification upon VLAN id. 2: classification upon MAC address. 3:
1631 classification upon both VLAN id & MAC addr. */
1632#define NIG_REG_LLH1_CLS_TYPE 0x16084
1633/* [RW 32] cm header for llh1 */
1634#define NIG_REG_LLH1_CM_HEADER 0x10080
1635#define NIG_REG_LLH1_ERROR_MASK 0x10090
1636/* [RW 8] event id for llh1 */
1637#define NIG_REG_LLH1_EVENT_ID 0x10088
1638/* [RW 8] init credit counter for port1 in LLH */
1639#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
1640#define NIG_REG_LLH1_XCM_MASK 0x10134
1641/* [RW 1] When this bit is set; the LLH will expect all packets to be with
1642 e1hov */
1643#define NIG_REG_LLH_E1HOV_MODE 0x160d8
1644/* [RW 1] When this bit is set; the LLH will classify the packet before
1645 sending it to the BRB or calculating WoL on it. */
1646#define NIG_REG_LLH_MF_MODE 0x16024
1647#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
1648#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
1649/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
1650#define NIG_REG_NIG_EMAC0_EN 0x1003c
1651/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
1652#define NIG_REG_NIG_EMAC1_EN 0x10040
1653/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
1654 EMAC0 to strip the CRC from the ingress packets. */
1655#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
1656/* [R 32] Interrupt register #0 read */
1657#define NIG_REG_NIG_INT_STS_0 0x103b0
1658#define NIG_REG_NIG_INT_STS_1 0x103c0
1659/* [R 32] Parity register #0 read */
1660#define NIG_REG_NIG_PRTY_STS 0x103d0
1661/* [RW 1] Pause enable for port0. This register may get 1 only when
1662 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
1663 port */
1664#define NIG_REG_PAUSE_ENABLE_0 0x160c0
1665/* [RW 1] Input enable for RX PBF LP IF */
1666#define NIG_REG_PBF_LB_IN_EN 0x100b4
1667/* [RW 1] Value of this register will be transmitted to port swap when
1668 ~nig_registers_strap_override.strap_override =1 */
1669#define NIG_REG_PORT_SWAP 0x10394
1670/* [RW 1] output enable for RX parser descriptor IF */
1671#define NIG_REG_PRS_EOP_OUT_EN 0x10104
1672/* [RW 1] Input enable for RX parser request IF */
1673#define NIG_REG_PRS_REQ_IN_EN 0x100b8
1674/* [RW 5] control to serdes - CL45 DEVAD */
1675#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370
1676/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
1677#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c
1678/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
1679#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374
1680/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
1681#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578
1682/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
1683 for port0 */
1684#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
1685/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
1686 for port0 */
1687#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
1688/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
1689 between 1024 and 1522 bytes for port0 */
1690#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
1691/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
1692 between 1523 bytes and above for port0 */
1693#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
1694/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
1695 for port1 */
1696#define NIG_REG_STAT1_BRB_DISCARD 0x10628
1697/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
1698 between 1024 and 1522 bytes for port1 */
1699#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
1700/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
1701 between 1523 bytes and above for port1 */
1702#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
1703/* [WB_R 64] Rx statistics : User octets received for LP */
1704#define NIG_REG_STAT2_BRB_OCTET 0x107e0
1705#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
1706#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
1707/* [RW 1] port swap mux selection. If this register equal to 0 then port
1708 swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
1709 ort swap is equal to ~nig_registers_port_swap.port_swap */
1710#define NIG_REG_STRAP_OVERRIDE 0x10398
1711/* [RW 1] output enable for RX_XCM0 IF */
1712#define NIG_REG_XCM0_OUT_EN 0x100f0
1713/* [RW 1] output enable for RX_XCM1 IF */
1714#define NIG_REG_XCM1_OUT_EN 0x100f4
1715/* [RW 1] control to xgxs - remote PHY in-band MDIO */
1716#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
1717/* [RW 5] control to xgxs - CL45 DEVAD */
1718#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
1719/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
1720#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
1721/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
1722#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
1723/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
1724#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680
1725/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
1726#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684
1727/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
1728#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8
1729/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
1730#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0
1731#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0)
1732#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
1733#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
1734#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
1735#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
1736/* [RW 1] Disable processing further tasks from port 0 (after ending the
1737 current task in process). */
1738#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
1739/* [RW 1] Disable processing further tasks from port 1 (after ending the
1740 current task in process). */
1741#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060
1742/* [RW 1] Disable processing further tasks from port 4 (after ending the
1743 current task in process). */
1744#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
1745#define PBF_REG_IF_ENABLE_REG 0x140044
1746/* [RW 1] Init bit. When set the initial credits are copied to the credit
1747 registers (except the port credits). Should be set and then reset after
1748 the configuration of the block has ended. */
1749#define PBF_REG_INIT 0x140000
1750/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
1751 copied to the credit register. Should be set and then reset after the
1752 configuration of the port has ended. */
1753#define PBF_REG_INIT_P0 0x140004
1754/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
1755 copied to the credit register. Should be set and then reset after the
1756 configuration of the port has ended. */
1757#define PBF_REG_INIT_P1 0x140008
1758/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
1759 copied to the credit register. Should be set and then reset after the
1760 configuration of the port has ended. */
1761#define PBF_REG_INIT_P4 0x14000c
1762/* [RW 1] Enable for mac interface 0. */
1763#define PBF_REG_MAC_IF0_ENABLE 0x140030
1764/* [RW 1] Enable for mac interface 1. */
1765#define PBF_REG_MAC_IF1_ENABLE 0x140034
1766/* [RW 1] Enable for the loopback interface. */
1767#define PBF_REG_MAC_LB_ENABLE 0x140040
1768/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
1769 not suppoterd. */
1770#define PBF_REG_P0_ARB_THRSH 0x1400e4
1771/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
1772#define PBF_REG_P0_CREDIT 0x140200
1773/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
1774 lines. */
1775#define PBF_REG_P0_INIT_CRD 0x1400d0
1776/* [RW 1] Indication that pause is enabled for port 0. */
1777#define PBF_REG_P0_PAUSE_ENABLE 0x140014
1778/* [R 8] Number of tasks in port 0 task queue. */
1779#define PBF_REG_P0_TASK_CNT 0x140204
1780/* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */
1781#define PBF_REG_P1_CREDIT 0x140208
1782/* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte
1783 lines. */
1784#define PBF_REG_P1_INIT_CRD 0x1400d4
1785/* [R 8] Number of tasks in port 1 task queue. */
1786#define PBF_REG_P1_TASK_CNT 0x14020c
1787/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
1788#define PBF_REG_P4_CREDIT 0x140210
1789/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
1790 lines. */
1791#define PBF_REG_P4_INIT_CRD 0x1400e0
1792/* [R 8] Number of tasks in port 4 task queue. */
1793#define PBF_REG_P4_TASK_CNT 0x140214
1794/* [RW 5] Interrupt mask register #0 read/write */
1795#define PBF_REG_PBF_INT_MASK 0x1401d4
1796/* [R 5] Interrupt register #0 read */
1797#define PBF_REG_PBF_INT_STS 0x1401c8
1798#define PB_REG_CONTROL 0
1799/* [RW 2] Interrupt mask register #0 read/write */
1800#define PB_REG_PB_INT_MASK 0x28
1801/* [R 2] Interrupt register #0 read */
1802#define PB_REG_PB_INT_STS 0x1c
1803/* [RW 4] Parity mask register #0 read/write */
1804#define PB_REG_PB_PRTY_MASK 0x38
1805/* [R 4] Parity register #0 read */
1806#define PB_REG_PB_PRTY_STS 0x2c
1807#define PRS_REG_A_PRSU_20 0x40134
1808/* [R 8] debug only: CFC load request current credit. Transaction based. */
1809#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
1810/* [R 8] debug only: CFC search request current credit. Transaction based. */
1811#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168
1812/* [RW 6] The initial credit for the search message to the CFC interface.
1813 Credit is transaction based. */
1814#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
1815/* [RW 24] CID for port 0 if no match */
1816#define PRS_REG_CID_PORT_0 0x400fc
1817/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
1818 load response is reset and packet type is 0. Used in packet start message
1819 to TCM. */
1820#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc
1821#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0
1822#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
1823#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
1824#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
1825#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
1826/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
1827 load response is set and packet type is 0. Used in packet start message
1828 to TCM. */
1829#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc
1830#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0
1831#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
1832#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
1833#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
1834#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
1835/* [RW 32] The CM header for a match and packet type 1 for loopback port.
1836 Used in packet start message to TCM. */
1837#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
1838#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0
1839#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4
1840#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8
1841/* [RW 32] The CM header for a match and packet type 0. Used in packet start
1842 message to TCM. */
1843#define PRS_REG_CM_HDR_TYPE_0 0x40078
1844#define PRS_REG_CM_HDR_TYPE_1 0x4007c
1845#define PRS_REG_CM_HDR_TYPE_2 0x40080
1846#define PRS_REG_CM_HDR_TYPE_3 0x40084
1847#define PRS_REG_CM_HDR_TYPE_4 0x40088
1848/* [RW 32] The CM header in case there was not a match on the connection */
1849#define PRS_REG_CM_NO_MATCH_HDR 0x400b8
1850/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
1851#define PRS_REG_E1HOV_MODE 0x401c8
1852/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
1853 start message to TCM. */
1854#define PRS_REG_EVENT_ID_1 0x40054
1855#define PRS_REG_EVENT_ID_2 0x40058
1856#define PRS_REG_EVENT_ID_3 0x4005c
1857/* [RW 16] The Ethernet type value for FCoE */
1858#define PRS_REG_FCOE_TYPE 0x401d0
1859/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
1860 load request message. */
1861#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
1862#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008
1863#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c
1864#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010
1865#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014
1866#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
1867#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
1868#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
1869/* [RW 4] The increment value to send in the CFC load request message */
1870#define PRS_REG_INC_VALUE 0x40048
1871/* [RW 1] If set indicates not to send messages to CFC on received packets */
1872#define PRS_REG_NIC_MODE 0x40138
1873/* [RW 8] The 8-bit event ID for cases where there is no match on the
1874 connection. Used in packet start message to TCM. */
1875#define PRS_REG_NO_MATCH_EVENT_ID 0x40070
1876/* [ST 24] The number of input CFC flush packets */
1877#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128
1878/* [ST 32] The number of cycles the Parser halted its operation since it
1879 could not allocate the next serial number */
1880#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130
1881/* [ST 24] The number of input packets */
1882#define PRS_REG_NUM_OF_PACKETS 0x40124
1883/* [ST 24] The number of input transparent flush packets */
1884#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c
1885/* [RW 8] Context region for received Ethernet packet with a match and
1886 packet type 0. Used in CFC load request message */
1887#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028
1888#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c
1889#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030
1890#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034
1891#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038
1892#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c
1893#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040
1894#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044
1895/* [R 2] debug only: Number of pending requests for CAC on port 0. */
1896#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174
1897/* [R 2] debug only: Number of pending requests for header parsing. */
1898#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170
1899/* [R 1] Interrupt register #0 read */
1900#define PRS_REG_PRS_INT_STS 0x40188
1901/* [RW 8] Parity mask register #0 read/write */
1902#define PRS_REG_PRS_PRTY_MASK 0x401a4
1903/* [R 8] Parity register #0 read */
1904#define PRS_REG_PRS_PRTY_STS 0x40198
1905/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
1906 request message */
1907#define PRS_REG_PURE_REGIONS 0x40024
1908/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
1909 serail number was released by SDM but cannot be used because a previous
1910 serial number was not released. */
1911#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154
1912/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
1913 serail number was released by SDM but cannot be used because a previous
1914 serial number was not released. */
1915#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
1916/* [R 4] debug only: SRC current credit. Transaction based. */
1917#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
1918/* [R 8] debug only: TCM current credit. Cycle based. */
1919#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
1920/* [R 8] debug only: TSDM current credit. Transaction based. */
1921#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
1922/* [R 6] Debug only: Number of used entries in the data FIFO */
1923#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
1924/* [R 7] Debug only: Number of used entries in the header FIFO */
1925#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
1926#define PXP2_REG_PGL_ADDR_88_F0 0x120534
1927#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
1928#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
1929#define PXP2_REG_PGL_ADDR_94_F0 0x120540
1930#define PXP2_REG_PGL_CONTROL0 0x120490
1931#define PXP2_REG_PGL_CONTROL1 0x120514
1932#define PXP2_REG_PGL_DEBUG 0x120520
1933/* [RW 32] third dword data of expansion rom request. this register is
1934 special. reading from it provides a vector outstanding read requests. if
1935 a bit is zero it means that a read request on the corresponding tag did
1936 not finish yet (not all completions have arrived for it) */
1937#define PXP2_REG_PGL_EXP_ROM2 0x120808
1938/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
1939 its[15:0]-address */
1940#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
1941#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8
1942#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc
1943#define PXP2_REG_PGL_INT_CSDM_3 0x120500
1944#define PXP2_REG_PGL_INT_CSDM_4 0x120504
1945#define PXP2_REG_PGL_INT_CSDM_5 0x120508
1946#define PXP2_REG_PGL_INT_CSDM_6 0x12050c
1947#define PXP2_REG_PGL_INT_CSDM_7 0x120510
1948/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
1949 its[15:0]-address */
1950#define PXP2_REG_PGL_INT_TSDM_0 0x120494
1951#define PXP2_REG_PGL_INT_TSDM_1 0x120498
1952#define PXP2_REG_PGL_INT_TSDM_2 0x12049c
1953#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0
1954#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4
1955#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8
1956#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac
1957#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0
1958/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
1959 its[15:0]-address */
1960#define PXP2_REG_PGL_INT_USDM_0 0x1204b4
1961#define PXP2_REG_PGL_INT_USDM_1 0x1204b8
1962#define PXP2_REG_PGL_INT_USDM_2 0x1204bc
1963#define PXP2_REG_PGL_INT_USDM_3 0x1204c0
1964#define PXP2_REG_PGL_INT_USDM_4 0x1204c4
1965#define PXP2_REG_PGL_INT_USDM_5 0x1204c8
1966#define PXP2_REG_PGL_INT_USDM_6 0x1204cc
1967#define PXP2_REG_PGL_INT_USDM_7 0x1204d0
1968/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
1969 its[15:0]-address */
1970#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4
1971#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8
1972#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc
1973#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0
1974#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4
1975#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8
1976#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec
1977#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0
1978/* [RW 3] this field allows one function to pretend being another function
1979 when accessing any BAR mapped resource within the device. the value of
1980 the field is the number of the function that will be accessed
1981 effectively. after software write to this bit it must read it in order to
1982 know that the new value is updated */
1983#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674
1984#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678
1985#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c
1986#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680
1987#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684
1988#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688
1989#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c
1990#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690
1991/* [R 1] this bit indicates that a read request was blocked because of
1992 bus_master_en was deasserted */
1993#define PXP2_REG_PGL_READ_BLOCKED 0x120568
1994#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
1995/* [R 18] debug only */
1996#define PXP2_REG_PGL_TXW_CDTS 0x12052c
1997/* [R 1] this bit indicates that a write request was blocked because of
1998 bus_master_en was deasserted */
1999#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564
2000#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0
2001#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4
2002#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8
2003#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4
2004#define PXP2_REG_PSWRQ_BW_ADD28 0x120228
2005#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8
2006#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4
2007#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8
2008#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc
2009#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0
2010#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c
2011#define PXP2_REG_PSWRQ_BW_L1 0x1202b0
2012#define PXP2_REG_PSWRQ_BW_L10 0x1202d4
2013#define PXP2_REG_PSWRQ_BW_L11 0x1202d8
2014#define PXP2_REG_PSWRQ_BW_L2 0x1202b4
2015#define PXP2_REG_PSWRQ_BW_L28 0x120318
2016#define PXP2_REG_PSWRQ_BW_L3 0x1202b8
2017#define PXP2_REG_PSWRQ_BW_L6 0x1202c4
2018#define PXP2_REG_PSWRQ_BW_L7 0x1202c8
2019#define PXP2_REG_PSWRQ_BW_L8 0x1202cc
2020#define PXP2_REG_PSWRQ_BW_L9 0x1202d0
2021#define PXP2_REG_PSWRQ_BW_RD 0x120324
2022#define PXP2_REG_PSWRQ_BW_UB1 0x120238
2023#define PXP2_REG_PSWRQ_BW_UB10 0x12025c
2024#define PXP2_REG_PSWRQ_BW_UB11 0x120260
2025#define PXP2_REG_PSWRQ_BW_UB2 0x12023c
2026#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0
2027#define PXP2_REG_PSWRQ_BW_UB3 0x120240
2028#define PXP2_REG_PSWRQ_BW_UB6 0x12024c
2029#define PXP2_REG_PSWRQ_BW_UB7 0x120250
2030#define PXP2_REG_PSWRQ_BW_UB8 0x120254
2031#define PXP2_REG_PSWRQ_BW_UB9 0x120258
2032#define PXP2_REG_PSWRQ_BW_WR 0x120328
2033#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000
2034#define PXP2_REG_PSWRQ_QM0_L2P 0x120038
2035#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
2036#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
2037#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
2038/* [RW 32] Interrupt mask register #0 read/write */
2039#define PXP2_REG_PXP2_INT_MASK_0 0x120578
2040/* [R 32] Interrupt register #0 read */
2041#define PXP2_REG_PXP2_INT_STS_0 0x12056c
2042#define PXP2_REG_PXP2_INT_STS_1 0x120608
2043/* [RC 32] Interrupt register #0 read clear */
2044#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
2045/* [RW 32] Parity mask register #0 read/write */
2046#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
2047#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
2048/* [R 32] Parity register #0 read */
2049#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
2050#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
2051/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
2052 indication about backpressure) */
2053#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
2054/* [R 8] Debug only: The blocks counter - number of unused block ids */
2055#define PXP2_REG_RD_BLK_CNT 0x120418
2056/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
2057 Must be bigger than 6. Normally should not be changed. */
2058#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c
2059/* [RW 2] CDU byte swapping mode configuration for master read requests */
2060#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404
2061/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
2062#define PXP2_REG_RD_DISABLE_INPUTS 0x120374
2063/* [R 1] PSWRD internal memories initialization is done */
2064#define PXP2_REG_RD_INIT_DONE 0x120370
2065/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2066 allocated for vq10 */
2067#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0
2068/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2069 allocated for vq11 */
2070#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4
2071/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2072 allocated for vq17 */
2073#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc
2074/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2075 allocated for vq18 */
2076#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0
2077/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2078 allocated for vq19 */
2079#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4
2080/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2081 allocated for vq22 */
2082#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
2083/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2084 allocated for vq25 */
2085#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
2086/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2087 allocated for vq6 */
2088#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
2089/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2090 allocated for vq9 */
2091#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c
2092/* [RW 2] PBF byte swapping mode configuration for master read requests */
2093#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4
2094/* [R 1] Debug only: Indication if delivery ports are idle */
2095#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c
2096#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420
2097/* [RW 2] QM byte swapping mode configuration for master read requests */
2098#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8
2099/* [R 7] Debug only: The SR counter - number of unused sub request ids */
2100#define PXP2_REG_RD_SR_CNT 0x120414
2101/* [RW 2] SRC byte swapping mode configuration for master read requests */
2102#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400
2103/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
2104 be bigger than 1. Normally should not be changed. */
2105#define PXP2_REG_RD_SR_NUM_CFG 0x120408
2106/* [RW 1] Signals the PSWRD block to start initializing internal memories */
2107#define PXP2_REG_RD_START_INIT 0x12036c
2108/* [RW 2] TM byte swapping mode configuration for master read requests */
2109#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc
2110/* [RW 10] Bandwidth addition to VQ0 write requests */
2111#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc
2112/* [RW 10] Bandwidth addition to VQ12 read requests */
2113#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec
2114/* [RW 10] Bandwidth addition to VQ13 read requests */
2115#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0
2116/* [RW 10] Bandwidth addition to VQ14 read requests */
2117#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4
2118/* [RW 10] Bandwidth addition to VQ15 read requests */
2119#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8
2120/* [RW 10] Bandwidth addition to VQ16 read requests */
2121#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc
2122/* [RW 10] Bandwidth addition to VQ17 read requests */
2123#define PXP2_REG_RQ_BW_RD_ADD17 0x120200
2124/* [RW 10] Bandwidth addition to VQ18 read requests */
2125#define PXP2_REG_RQ_BW_RD_ADD18 0x120204
2126/* [RW 10] Bandwidth addition to VQ19 read requests */
2127#define PXP2_REG_RQ_BW_RD_ADD19 0x120208
2128/* [RW 10] Bandwidth addition to VQ20 read requests */
2129#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c
2130/* [RW 10] Bandwidth addition to VQ22 read requests */
2131#define PXP2_REG_RQ_BW_RD_ADD22 0x120210
2132/* [RW 10] Bandwidth addition to VQ23 read requests */
2133#define PXP2_REG_RQ_BW_RD_ADD23 0x120214
2134/* [RW 10] Bandwidth addition to VQ24 read requests */
2135#define PXP2_REG_RQ_BW_RD_ADD24 0x120218
2136/* [RW 10] Bandwidth addition to VQ25 read requests */
2137#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c
2138/* [RW 10] Bandwidth addition to VQ26 read requests */
2139#define PXP2_REG_RQ_BW_RD_ADD26 0x120220
2140/* [RW 10] Bandwidth addition to VQ27 read requests */
2141#define PXP2_REG_RQ_BW_RD_ADD27 0x120224
2142/* [RW 10] Bandwidth addition to VQ4 read requests */
2143#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc
2144/* [RW 10] Bandwidth addition to VQ5 read requests */
2145#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0
2146/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
2147#define PXP2_REG_RQ_BW_RD_L0 0x1202ac
2148/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
2149#define PXP2_REG_RQ_BW_RD_L12 0x1202dc
2150/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
2151#define PXP2_REG_RQ_BW_RD_L13 0x1202e0
2152/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
2153#define PXP2_REG_RQ_BW_RD_L14 0x1202e4
2154/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
2155#define PXP2_REG_RQ_BW_RD_L15 0x1202e8
2156/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
2157#define PXP2_REG_RQ_BW_RD_L16 0x1202ec
2158/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
2159#define PXP2_REG_RQ_BW_RD_L17 0x1202f0
2160/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
2161#define PXP2_REG_RQ_BW_RD_L18 0x1202f4
2162/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
2163#define PXP2_REG_RQ_BW_RD_L19 0x1202f8
2164/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
2165#define PXP2_REG_RQ_BW_RD_L20 0x1202fc
2166/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
2167#define PXP2_REG_RQ_BW_RD_L22 0x120300
2168/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
2169#define PXP2_REG_RQ_BW_RD_L23 0x120304
2170/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
2171#define PXP2_REG_RQ_BW_RD_L24 0x120308
2172/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
2173#define PXP2_REG_RQ_BW_RD_L25 0x12030c
2174/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
2175#define PXP2_REG_RQ_BW_RD_L26 0x120310
2176/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
2177#define PXP2_REG_RQ_BW_RD_L27 0x120314
2178/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
2179#define PXP2_REG_RQ_BW_RD_L4 0x1202bc
2180/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
2181#define PXP2_REG_RQ_BW_RD_L5 0x1202c0
2182/* [RW 7] Bandwidth upper bound for VQ0 read requests */
2183#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234
2184/* [RW 7] Bandwidth upper bound for VQ12 read requests */
2185#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264
2186/* [RW 7] Bandwidth upper bound for VQ13 read requests */
2187#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268
2188/* [RW 7] Bandwidth upper bound for VQ14 read requests */
2189#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c
2190/* [RW 7] Bandwidth upper bound for VQ15 read requests */
2191#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270
2192/* [RW 7] Bandwidth upper bound for VQ16 read requests */
2193#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274
2194/* [RW 7] Bandwidth upper bound for VQ17 read requests */
2195#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278
2196/* [RW 7] Bandwidth upper bound for VQ18 read requests */
2197#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c
2198/* [RW 7] Bandwidth upper bound for VQ19 read requests */
2199#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280
2200/* [RW 7] Bandwidth upper bound for VQ20 read requests */
2201#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284
2202/* [RW 7] Bandwidth upper bound for VQ22 read requests */
2203#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288
2204/* [RW 7] Bandwidth upper bound for VQ23 read requests */
2205#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c
2206/* [RW 7] Bandwidth upper bound for VQ24 read requests */
2207#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290
2208/* [RW 7] Bandwidth upper bound for VQ25 read requests */
2209#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294
2210/* [RW 7] Bandwidth upper bound for VQ26 read requests */
2211#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298
2212/* [RW 7] Bandwidth upper bound for VQ27 read requests */
2213#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c
2214/* [RW 7] Bandwidth upper bound for VQ4 read requests */
2215#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244
2216/* [RW 7] Bandwidth upper bound for VQ5 read requests */
2217#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248
2218/* [RW 10] Bandwidth addition to VQ29 write requests */
2219#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c
2220/* [RW 10] Bandwidth addition to VQ30 write requests */
2221#define PXP2_REG_RQ_BW_WR_ADD30 0x120230
2222/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
2223#define PXP2_REG_RQ_BW_WR_L29 0x12031c
2224/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
2225#define PXP2_REG_RQ_BW_WR_L30 0x120320
2226/* [RW 7] Bandwidth upper bound for VQ29 */
2227#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
2228/* [RW 7] Bandwidth upper bound for VQ30 */
2229#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
2230/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
2231#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
2232/* [RW 2] Endian mode for cdu */
2233#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
2234#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
2235#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
2236/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
2237 -128k */
2238#define PXP2_REG_RQ_CDU_P_SIZE 0x120018
2239/* [R 1] 1' indicates that the requester has finished its internal
2240 configuration */
2241#define PXP2_REG_RQ_CFG_DONE 0x1201b4
2242/* [RW 2] Endian mode for debug */
2243#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4
2244/* [RW 1] When '1'; requests will enter input buffers but wont get out
2245 towards the glue */
2246#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
2247/* [RW 1] 1 - SR will be aligned by 64B; 0 - SR will be aligned by 8B */
2248#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
2249/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
2250 be asserted */
2251#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
2252/* [RW 2] Endian mode for hc */
2253#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
2254/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
2255 compatibility needs; Note that different registers are used per mode */
2256#define PXP2_REG_RQ_ILT_MODE 0x1205b4
2257/* [WB 53] Onchip address table */
2258#define PXP2_REG_RQ_ONCHIP_AT 0x122000
2259/* [WB 53] Onchip address table - B0 */
2260#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
2261/* [RW 13] Pending read limiter threshold; in Dwords */
2262#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
2263/* [RW 2] Endian mode for qm */
2264#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
2265#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
2266#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
2267/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
2268 -128k */
2269#define PXP2_REG_RQ_QM_P_SIZE 0x120050
2270/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
2271#define PXP2_REG_RQ_RBC_DONE 0x1201b0
2272/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
2273 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
2274#define PXP2_REG_RQ_RD_MBS0 0x120160
2275/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
2276 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
2277#define PXP2_REG_RQ_RD_MBS1 0x120168
2278/* [RW 2] Endian mode for src */
2279#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
2280#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
2281#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
2282/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
2283 -128k */
2284#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
2285/* [RW 2] Endian mode for tm */
2286#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
2287#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
2288#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
2289/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
2290 -128k */
2291#define PXP2_REG_RQ_TM_P_SIZE 0x120034
2292/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
2293#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
2294/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
2295#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
2296/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
2297#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
2298/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
2299#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818
2300/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
2301#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820
2302/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
2303#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828
2304/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
2305#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830
2306/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
2307#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838
2308/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
2309#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840
2310/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
2311#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848
2312/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
2313#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850
2314/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
2315#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858
2316/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
2317#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860
2318/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
2319#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868
2320/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
2321#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870
2322/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
2323#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878
2324/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
2325#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880
2326/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
2327#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888
2328/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
2329#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890
2330/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
2331#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898
2332/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
2333#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0
2334/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
2335#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8
2336/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
2337#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0
2338/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
2339#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8
2340/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
2341#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0
2342/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
2343#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8
2344/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
2345#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0
2346/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
2347#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8
2348/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
2349#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0
2350/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
2351#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8
2352/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
2353#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0
2354/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
2355#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8
2356/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
2357#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900
2358/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
2359#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908
2360/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
2361 001:256B; 010: 512B; */
2362#define PXP2_REG_RQ_WR_MBS0 0x12015c
2363/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
2364 001:256B; 010: 512B; */
2365#define PXP2_REG_RQ_WR_MBS1 0x120164
2366/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2367 buffer reaches this number has_payload will be asserted */
2368#define PXP2_REG_WR_CDU_MPS 0x1205f0
2369/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2370 buffer reaches this number has_payload will be asserted */
2371#define PXP2_REG_WR_CSDM_MPS 0x1205d0
2372/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2373 buffer reaches this number has_payload will be asserted */
2374#define PXP2_REG_WR_DBG_MPS 0x1205e8
2375/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2376 buffer reaches this number has_payload will be asserted */
2377#define PXP2_REG_WR_DMAE_MPS 0x1205ec
2378/* [RW 10] if Number of entries in dmae fifo will be higher than this
2379 threshold then has_payload indication will be asserted; the default value
2380 should be equal to &gt; write MBS size! */
2381#define PXP2_REG_WR_DMAE_TH 0x120368
2382/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2383 buffer reaches this number has_payload will be asserted */
2384#define PXP2_REG_WR_HC_MPS 0x1205c8
2385/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2386 buffer reaches this number has_payload will be asserted */
2387#define PXP2_REG_WR_QM_MPS 0x1205dc
2388/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
2389#define PXP2_REG_WR_REV_MODE 0x120670
2390/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2391 buffer reaches this number has_payload will be asserted */
2392#define PXP2_REG_WR_SRC_MPS 0x1205e4
2393/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2394 buffer reaches this number has_payload will be asserted */
2395#define PXP2_REG_WR_TM_MPS 0x1205e0
2396/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2397 buffer reaches this number has_payload will be asserted */
2398#define PXP2_REG_WR_TSDM_MPS 0x1205d4
2399/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
2400 threshold then has_payload indication will be asserted; the default value
2401 should be equal to &gt; write MBS size! */
2402#define PXP2_REG_WR_USDMDP_TH 0x120348
2403/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2404 buffer reaches this number has_payload will be asserted */
2405#define PXP2_REG_WR_USDM_MPS 0x1205cc
2406/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2407 buffer reaches this number has_payload will be asserted */
2408#define PXP2_REG_WR_XSDM_MPS 0x1205d8
2409/* [R 1] debug only: Indication if PSWHST arbiter is idle */
2410#define PXP_REG_HST_ARB_IS_IDLE 0x103004
2411/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
2412 this client is waiting for the arbiter. */
2413#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
2414/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
2415 block. Should be used for close the gates. */
2416#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
2417/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
2418 should update accoring to 'hst_discard_doorbells' register when the state
2419 machine is idle */
2420#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
2421/* [RW 1] When 1; new internal writes arriving to the block are discarded.
2422 Should be used for close the gates. */
2423#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
2424/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
2425 means this PSWHST is discarding inputs from this client. Each bit should
2426 update accoring to 'hst_discard_internal_writes' register when the state
2427 machine is idle. */
2428#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
2429/* [WB 160] Used for initialization of the inbound interrupts memory */
2430#define PXP_REG_HST_INBOUND_INT 0x103800
2431/* [RW 32] Interrupt mask register #0 read/write */
2432#define PXP_REG_PXP_INT_MASK_0 0x103074
2433#define PXP_REG_PXP_INT_MASK_1 0x103084
2434/* [R 32] Interrupt register #0 read */
2435#define PXP_REG_PXP_INT_STS_0 0x103068
2436#define PXP_REG_PXP_INT_STS_1 0x103078
2437/* [RC 32] Interrupt register #0 read clear */
2438#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
2439/* [RW 26] Parity mask register #0 read/write */
2440#define PXP_REG_PXP_PRTY_MASK 0x103094
2441/* [R 26] Parity register #0 read */
2442#define PXP_REG_PXP_PRTY_STS 0x103088
2443/* [RW 4] The activity counter initial increment value sent in the load
2444 request */
2445#define QM_REG_ACTCTRINITVAL_0 0x168040
2446#define QM_REG_ACTCTRINITVAL_1 0x168044
2447#define QM_REG_ACTCTRINITVAL_2 0x168048
2448#define QM_REG_ACTCTRINITVAL_3 0x16804c
2449/* [RW 32] The base logical address (in bytes) of each physical queue. The
2450 index I represents the physical queue number. The 12 lsbs are ignore and
2451 considered zero so practically there are only 20 bits in this register;
2452 queues 63-0 */
2453#define QM_REG_BASEADDR 0x168900
2454/* [RW 32] The base logical address (in bytes) of each physical queue. The
2455 index I represents the physical queue number. The 12 lsbs are ignore and
2456 considered zero so practically there are only 20 bits in this register;
2457 queues 127-64 */
2458#define QM_REG_BASEADDR_EXT_A 0x16e100
2459/* [RW 16] The byte credit cost for each task. This value is for both ports */
2460#define QM_REG_BYTECRDCOST 0x168234
2461/* [RW 16] The initial byte credit value for both ports. */
2462#define QM_REG_BYTECRDINITVAL 0x168238
2463/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2464 queue uses port 0 else it uses port 1; queues 31-0 */
2465#define QM_REG_BYTECRDPORT_LSB 0x168228
2466/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2467 queue uses port 0 else it uses port 1; queues 95-64 */
2468#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
2469/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2470 queue uses port 0 else it uses port 1; queues 63-32 */
2471#define QM_REG_BYTECRDPORT_MSB 0x168224
2472/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2473 queue uses port 0 else it uses port 1; queues 127-96 */
2474#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
2475/* [RW 16] The byte credit value that if above the QM is considered almost
2476 full */
2477#define QM_REG_BYTECREDITAFULLTHR 0x168094
2478/* [RW 4] The initial credit for interface */
2479#define QM_REG_CMINITCRD_0 0x1680cc
2480#define QM_REG_CMINITCRD_1 0x1680d0
2481#define QM_REG_CMINITCRD_2 0x1680d4
2482#define QM_REG_CMINITCRD_3 0x1680d8
2483#define QM_REG_CMINITCRD_4 0x1680dc
2484#define QM_REG_CMINITCRD_5 0x1680e0
2485#define QM_REG_CMINITCRD_6 0x1680e4
2486#define QM_REG_CMINITCRD_7 0x1680e8
2487/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
2488 is masked */
2489#define QM_REG_CMINTEN 0x1680ec
2490/* [RW 12] A bit vector which indicates which one of the queues are tied to
2491 interface 0 */
2492#define QM_REG_CMINTVOQMASK_0 0x1681f4
2493#define QM_REG_CMINTVOQMASK_1 0x1681f8
2494#define QM_REG_CMINTVOQMASK_2 0x1681fc
2495#define QM_REG_CMINTVOQMASK_3 0x168200
2496#define QM_REG_CMINTVOQMASK_4 0x168204
2497#define QM_REG_CMINTVOQMASK_5 0x168208
2498#define QM_REG_CMINTVOQMASK_6 0x16820c
2499#define QM_REG_CMINTVOQMASK_7 0x168210
2500/* [RW 20] The number of connections divided by 16 which dictates the size
2501 of each queue which belongs to even function number. */
2502#define QM_REG_CONNNUM_0 0x168020
2503/* [R 6] Keep the fill level of the fifo from write client 4 */
2504#define QM_REG_CQM_WRC_FIFOLVL 0x168018
2505/* [RW 8] The context regions sent in the CFC load request */
2506#define QM_REG_CTXREG_0 0x168030
2507#define QM_REG_CTXREG_1 0x168034
2508#define QM_REG_CTXREG_2 0x168038
2509#define QM_REG_CTXREG_3 0x16803c
2510/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
2511 bypass enable */
2512#define QM_REG_ENBYPVOQMASK 0x16823c
2513/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2514 physical queue uses the byte credit; queues 31-0 */
2515#define QM_REG_ENBYTECRD_LSB 0x168220
2516/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2517 physical queue uses the byte credit; queues 95-64 */
2518#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
2519/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2520 physical queue uses the byte credit; queues 63-32 */
2521#define QM_REG_ENBYTECRD_MSB 0x16821c
2522/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2523 physical queue uses the byte credit; queues 127-96 */
2524#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
2525/* [RW 4] If cleared then the secondary interface will not be served by the
2526 RR arbiter */
2527#define QM_REG_ENSEC 0x1680f0
2528/* [RW 32] NA */
2529#define QM_REG_FUNCNUMSEL_LSB 0x168230
2530/* [RW 32] NA */
2531#define QM_REG_FUNCNUMSEL_MSB 0x16822c
2532/* [RW 32] A mask register to mask the Almost empty signals which will not
2533 be use for the almost empty indication to the HW block; queues 31:0 */
2534#define QM_REG_HWAEMPTYMASK_LSB 0x168218
2535/* [RW 32] A mask register to mask the Almost empty signals which will not
2536 be use for the almost empty indication to the HW block; queues 95-64 */
2537#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
2538/* [RW 32] A mask register to mask the Almost empty signals which will not
2539 be use for the almost empty indication to the HW block; queues 63:32 */
2540#define QM_REG_HWAEMPTYMASK_MSB 0x168214
2541/* [RW 32] A mask register to mask the Almost empty signals which will not
2542 be use for the almost empty indication to the HW block; queues 127-96 */
2543#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
2544/* [RW 4] The number of outstanding request to CFC */
2545#define QM_REG_OUTLDREQ 0x168804
2546/* [RC 1] A flag to indicate that overflow error occurred in one of the
2547 queues. */
2548#define QM_REG_OVFERROR 0x16805c
2549/* [RC 7] the Q where the overflow occurs */
2550#define QM_REG_OVFQNUM 0x168058
2551/* [R 16] Pause state for physical queues 15-0 */
2552#define QM_REG_PAUSESTATE0 0x168410
2553/* [R 16] Pause state for physical queues 31-16 */
2554#define QM_REG_PAUSESTATE1 0x168414
2555/* [R 16] Pause state for physical queues 47-32 */
2556#define QM_REG_PAUSESTATE2 0x16e684
2557/* [R 16] Pause state for physical queues 63-48 */
2558#define QM_REG_PAUSESTATE3 0x16e688
2559/* [R 16] Pause state for physical queues 79-64 */
2560#define QM_REG_PAUSESTATE4 0x16e68c
2561/* [R 16] Pause state for physical queues 95-80 */
2562#define QM_REG_PAUSESTATE5 0x16e690
2563/* [R 16] Pause state for physical queues 111-96 */
2564#define QM_REG_PAUSESTATE6 0x16e694
2565/* [R 16] Pause state for physical queues 127-112 */
2566#define QM_REG_PAUSESTATE7 0x16e698
2567/* [RW 2] The PCI attributes field used in the PCI request. */
2568#define QM_REG_PCIREQAT 0x168054
2569/* [R 16] The byte credit of port 0 */
2570#define QM_REG_PORT0BYTECRD 0x168300
2571/* [R 16] The byte credit of port 1 */
2572#define QM_REG_PORT1BYTECRD 0x168304
2573/* [RW 3] pci function number of queues 15-0 */
2574#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
2575#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
2576#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
2577#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
2578#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
2579#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
2580#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
2581#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
2582/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
2583 ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
2584 bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
2585#define QM_REG_PTRTBL 0x168a00
2586/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
2587 ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
2588 bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
2589#define QM_REG_PTRTBL_EXT_A 0x16e200
2590/* [RW 2] Interrupt mask register #0 read/write */
2591#define QM_REG_QM_INT_MASK 0x168444
2592/* [R 2] Interrupt register #0 read */
2593#define QM_REG_QM_INT_STS 0x168438
2594/* [RW 12] Parity mask register #0 read/write */
2595#define QM_REG_QM_PRTY_MASK 0x168454
2596/* [R 12] Parity register #0 read */
2597#define QM_REG_QM_PRTY_STS 0x168448
2598/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
2599#define QM_REG_QSTATUS_HIGH 0x16802c
2600/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
2601#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
2602/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
2603#define QM_REG_QSTATUS_LOW 0x168028
2604/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
2605#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
2606/* [R 24] The number of tasks queued for each queue; queues 63-0 */
2607#define QM_REG_QTASKCTR_0 0x168308
2608/* [R 24] The number of tasks queued for each queue; queues 127-64 */
2609#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
2610/* [RW 4] Queue tied to VOQ */
2611#define QM_REG_QVOQIDX_0 0x1680f4
2612#define QM_REG_QVOQIDX_10 0x16811c
2613#define QM_REG_QVOQIDX_100 0x16e49c
2614#define QM_REG_QVOQIDX_101 0x16e4a0
2615#define QM_REG_QVOQIDX_102 0x16e4a4
2616#define QM_REG_QVOQIDX_103 0x16e4a8
2617#define QM_REG_QVOQIDX_104 0x16e4ac
2618#define QM_REG_QVOQIDX_105 0x16e4b0
2619#define QM_REG_QVOQIDX_106 0x16e4b4
2620#define QM_REG_QVOQIDX_107 0x16e4b8
2621#define QM_REG_QVOQIDX_108 0x16e4bc
2622#define QM_REG_QVOQIDX_109 0x16e4c0
2623#define QM_REG_QVOQIDX_11 0x168120
2624#define QM_REG_QVOQIDX_110 0x16e4c4
2625#define QM_REG_QVOQIDX_111 0x16e4c8
2626#define QM_REG_QVOQIDX_112 0x16e4cc
2627#define QM_REG_QVOQIDX_113 0x16e4d0
2628#define QM_REG_QVOQIDX_114 0x16e4d4
2629#define QM_REG_QVOQIDX_115 0x16e4d8
2630#define QM_REG_QVOQIDX_116 0x16e4dc
2631#define QM_REG_QVOQIDX_117 0x16e4e0
2632#define QM_REG_QVOQIDX_118 0x16e4e4
2633#define QM_REG_QVOQIDX_119 0x16e4e8
2634#define QM_REG_QVOQIDX_12 0x168124
2635#define QM_REG_QVOQIDX_120 0x16e4ec
2636#define QM_REG_QVOQIDX_121 0x16e4f0
2637#define QM_REG_QVOQIDX_122 0x16e4f4
2638#define QM_REG_QVOQIDX_123 0x16e4f8
2639#define QM_REG_QVOQIDX_124 0x16e4fc
2640#define QM_REG_QVOQIDX_125 0x16e500
2641#define QM_REG_QVOQIDX_126 0x16e504
2642#define QM_REG_QVOQIDX_127 0x16e508
2643#define QM_REG_QVOQIDX_13 0x168128
2644#define QM_REG_QVOQIDX_14 0x16812c
2645#define QM_REG_QVOQIDX_15 0x168130
2646#define QM_REG_QVOQIDX_16 0x168134
2647#define QM_REG_QVOQIDX_17 0x168138
2648#define QM_REG_QVOQIDX_21 0x168148
2649#define QM_REG_QVOQIDX_22 0x16814c
2650#define QM_REG_QVOQIDX_23 0x168150
2651#define QM_REG_QVOQIDX_24 0x168154
2652#define QM_REG_QVOQIDX_25 0x168158
2653#define QM_REG_QVOQIDX_26 0x16815c
2654#define QM_REG_QVOQIDX_27 0x168160
2655#define QM_REG_QVOQIDX_28 0x168164
2656#define QM_REG_QVOQIDX_29 0x168168
2657#define QM_REG_QVOQIDX_30 0x16816c
2658#define QM_REG_QVOQIDX_31 0x168170
2659#define QM_REG_QVOQIDX_32 0x168174
2660#define QM_REG_QVOQIDX_33 0x168178
2661#define QM_REG_QVOQIDX_34 0x16817c
2662#define QM_REG_QVOQIDX_35 0x168180
2663#define QM_REG_QVOQIDX_36 0x168184
2664#define QM_REG_QVOQIDX_37 0x168188
2665#define QM_REG_QVOQIDX_38 0x16818c
2666#define QM_REG_QVOQIDX_39 0x168190
2667#define QM_REG_QVOQIDX_40 0x168194
2668#define QM_REG_QVOQIDX_41 0x168198
2669#define QM_REG_QVOQIDX_42 0x16819c
2670#define QM_REG_QVOQIDX_43 0x1681a0
2671#define QM_REG_QVOQIDX_44 0x1681a4
2672#define QM_REG_QVOQIDX_45 0x1681a8
2673#define QM_REG_QVOQIDX_46 0x1681ac
2674#define QM_REG_QVOQIDX_47 0x1681b0
2675#define QM_REG_QVOQIDX_48 0x1681b4
2676#define QM_REG_QVOQIDX_49 0x1681b8
2677#define QM_REG_QVOQIDX_5 0x168108
2678#define QM_REG_QVOQIDX_50 0x1681bc
2679#define QM_REG_QVOQIDX_51 0x1681c0
2680#define QM_REG_QVOQIDX_52 0x1681c4
2681#define QM_REG_QVOQIDX_53 0x1681c8
2682#define QM_REG_QVOQIDX_54 0x1681cc
2683#define QM_REG_QVOQIDX_55 0x1681d0
2684#define QM_REG_QVOQIDX_56 0x1681d4
2685#define QM_REG_QVOQIDX_57 0x1681d8
2686#define QM_REG_QVOQIDX_58 0x1681dc
2687#define QM_REG_QVOQIDX_59 0x1681e0
2688#define QM_REG_QVOQIDX_6 0x16810c
2689#define QM_REG_QVOQIDX_60 0x1681e4
2690#define QM_REG_QVOQIDX_61 0x1681e8
2691#define QM_REG_QVOQIDX_62 0x1681ec
2692#define QM_REG_QVOQIDX_63 0x1681f0
2693#define QM_REG_QVOQIDX_64 0x16e40c
2694#define QM_REG_QVOQIDX_65 0x16e410
2695#define QM_REG_QVOQIDX_69 0x16e420
2696#define QM_REG_QVOQIDX_7 0x168110
2697#define QM_REG_QVOQIDX_70 0x16e424
2698#define QM_REG_QVOQIDX_71 0x16e428
2699#define QM_REG_QVOQIDX_72 0x16e42c
2700#define QM_REG_QVOQIDX_73 0x16e430
2701#define QM_REG_QVOQIDX_74 0x16e434
2702#define QM_REG_QVOQIDX_75 0x16e438
2703#define QM_REG_QVOQIDX_76 0x16e43c
2704#define QM_REG_QVOQIDX_77 0x16e440
2705#define QM_REG_QVOQIDX_78 0x16e444
2706#define QM_REG_QVOQIDX_79 0x16e448
2707#define QM_REG_QVOQIDX_8 0x168114
2708#define QM_REG_QVOQIDX_80 0x16e44c
2709#define QM_REG_QVOQIDX_81 0x16e450
2710#define QM_REG_QVOQIDX_85 0x16e460
2711#define QM_REG_QVOQIDX_86 0x16e464
2712#define QM_REG_QVOQIDX_87 0x16e468
2713#define QM_REG_QVOQIDX_88 0x16e46c
2714#define QM_REG_QVOQIDX_89 0x16e470
2715#define QM_REG_QVOQIDX_9 0x168118
2716#define QM_REG_QVOQIDX_90 0x16e474
2717#define QM_REG_QVOQIDX_91 0x16e478
2718#define QM_REG_QVOQIDX_92 0x16e47c
2719#define QM_REG_QVOQIDX_93 0x16e480
2720#define QM_REG_QVOQIDX_94 0x16e484
2721#define QM_REG_QVOQIDX_95 0x16e488
2722#define QM_REG_QVOQIDX_96 0x16e48c
2723#define QM_REG_QVOQIDX_97 0x16e490
2724#define QM_REG_QVOQIDX_98 0x16e494
2725#define QM_REG_QVOQIDX_99 0x16e498
2726/* [RW 1] Initialization bit command */
2727#define QM_REG_SOFT_RESET 0x168428
2728/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
2729#define QM_REG_TASKCRDCOST_0 0x16809c
2730#define QM_REG_TASKCRDCOST_1 0x1680a0
2731#define QM_REG_TASKCRDCOST_2 0x1680a4
2732#define QM_REG_TASKCRDCOST_4 0x1680ac
2733#define QM_REG_TASKCRDCOST_5 0x1680b0
2734/* [R 6] Keep the fill level of the fifo from write client 3 */
2735#define QM_REG_TQM_WRC_FIFOLVL 0x168010
2736/* [R 6] Keep the fill level of the fifo from write client 2 */
2737#define QM_REG_UQM_WRC_FIFOLVL 0x168008
2738/* [RC 32] Credit update error register */
2739#define QM_REG_VOQCRDERRREG 0x168408
2740/* [R 16] The credit value for each VOQ */
2741#define QM_REG_VOQCREDIT_0 0x1682d0
2742#define QM_REG_VOQCREDIT_1 0x1682d4
2743#define QM_REG_VOQCREDIT_4 0x1682e0
2744/* [RW 16] The credit value that if above the QM is considered almost full */
2745#define QM_REG_VOQCREDITAFULLTHR 0x168090
2746/* [RW 16] The init and maximum credit for each VoQ */
2747#define QM_REG_VOQINITCREDIT_0 0x168060
2748#define QM_REG_VOQINITCREDIT_1 0x168064
2749#define QM_REG_VOQINITCREDIT_2 0x168068
2750#define QM_REG_VOQINITCREDIT_4 0x168070
2751#define QM_REG_VOQINITCREDIT_5 0x168074
2752/* [RW 1] The port of which VOQ belongs */
2753#define QM_REG_VOQPORT_0 0x1682a0
2754#define QM_REG_VOQPORT_1 0x1682a4
2755#define QM_REG_VOQPORT_2 0x1682a8
2756/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2757#define QM_REG_VOQQMASK_0_LSB 0x168240
2758/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2759#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
2760/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2761#define QM_REG_VOQQMASK_0_MSB 0x168244
2762/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2763#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
2764/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2765#define QM_REG_VOQQMASK_10_LSB 0x168290
2766/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2767#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
2768/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2769#define QM_REG_VOQQMASK_10_MSB 0x168294
2770/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2771#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
2772/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2773#define QM_REG_VOQQMASK_11_LSB 0x168298
2774/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2775#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
2776/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2777#define QM_REG_VOQQMASK_11_MSB 0x16829c
2778/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2779#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
2780/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2781#define QM_REG_VOQQMASK_1_LSB 0x168248
2782/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2783#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
2784/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2785#define QM_REG_VOQQMASK_1_MSB 0x16824c
2786/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2787#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
2788/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2789#define QM_REG_VOQQMASK_2_LSB 0x168250
2790/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2791#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
2792/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2793#define QM_REG_VOQQMASK_2_MSB 0x168254
2794/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2795#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
2796/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2797#define QM_REG_VOQQMASK_3_LSB 0x168258
2798/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2799#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
2800/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2801#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
2802/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2803#define QM_REG_VOQQMASK_4_LSB 0x168260
2804/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2805#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
2806/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2807#define QM_REG_VOQQMASK_4_MSB 0x168264
2808/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2809#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
2810/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2811#define QM_REG_VOQQMASK_5_LSB 0x168268
2812/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2813#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
2814/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2815#define QM_REG_VOQQMASK_5_MSB 0x16826c
2816/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2817#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
2818/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2819#define QM_REG_VOQQMASK_6_LSB 0x168270
2820/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2821#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
2822/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2823#define QM_REG_VOQQMASK_6_MSB 0x168274
2824/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2825#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
2826/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2827#define QM_REG_VOQQMASK_7_LSB 0x168278
2828/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2829#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
2830/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2831#define QM_REG_VOQQMASK_7_MSB 0x16827c
2832/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2833#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
2834/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2835#define QM_REG_VOQQMASK_8_LSB 0x168280
2836/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2837#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
2838/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2839#define QM_REG_VOQQMASK_8_MSB 0x168284
2840/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2841#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
2842/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2843#define QM_REG_VOQQMASK_9_LSB 0x168288
2844/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2845#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
2846/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2847#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
2848/* [RW 32] Wrr weights */
2849#define QM_REG_WRRWEIGHTS_0 0x16880c
2850#define QM_REG_WRRWEIGHTS_1 0x168810
2851#define QM_REG_WRRWEIGHTS_10 0x168814
2852#define QM_REG_WRRWEIGHTS_11 0x168818
2853#define QM_REG_WRRWEIGHTS_12 0x16881c
2854#define QM_REG_WRRWEIGHTS_13 0x168820
2855#define QM_REG_WRRWEIGHTS_14 0x168824
2856#define QM_REG_WRRWEIGHTS_15 0x168828
2857#define QM_REG_WRRWEIGHTS_16 0x16e000
2858#define QM_REG_WRRWEIGHTS_17 0x16e004
2859#define QM_REG_WRRWEIGHTS_18 0x16e008
2860#define QM_REG_WRRWEIGHTS_19 0x16e00c
2861#define QM_REG_WRRWEIGHTS_2 0x16882c
2862#define QM_REG_WRRWEIGHTS_20 0x16e010
2863#define QM_REG_WRRWEIGHTS_21 0x16e014
2864#define QM_REG_WRRWEIGHTS_22 0x16e018
2865#define QM_REG_WRRWEIGHTS_23 0x16e01c
2866#define QM_REG_WRRWEIGHTS_24 0x16e020
2867#define QM_REG_WRRWEIGHTS_25 0x16e024
2868#define QM_REG_WRRWEIGHTS_26 0x16e028
2869#define QM_REG_WRRWEIGHTS_27 0x16e02c
2870#define QM_REG_WRRWEIGHTS_28 0x16e030
2871#define QM_REG_WRRWEIGHTS_29 0x16e034
2872#define QM_REG_WRRWEIGHTS_3 0x168830
2873#define QM_REG_WRRWEIGHTS_30 0x16e038
2874#define QM_REG_WRRWEIGHTS_31 0x16e03c
2875#define QM_REG_WRRWEIGHTS_4 0x168834
2876#define QM_REG_WRRWEIGHTS_5 0x168838
2877#define QM_REG_WRRWEIGHTS_6 0x16883c
2878#define QM_REG_WRRWEIGHTS_7 0x168840
2879#define QM_REG_WRRWEIGHTS_8 0x168844
2880#define QM_REG_WRRWEIGHTS_9 0x168848
2881/* [R 6] Keep the fill level of the fifo from write client 1 */
2882#define QM_REG_XQM_WRC_FIFOLVL 0x168000
2883#define SRC_REG_COUNTFREE0 0x40500
2884/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
2885 ports. If set the searcher support 8 functions. */
2886#define SRC_REG_E1HMF_ENABLE 0x404cc
2887#define SRC_REG_FIRSTFREE0 0x40510
2888#define SRC_REG_KEYRSS0_0 0x40408
2889#define SRC_REG_KEYRSS0_7 0x40424
2890#define SRC_REG_KEYRSS1_9 0x40454
2891#define SRC_REG_KEYSEARCH_0 0x40458
2892#define SRC_REG_KEYSEARCH_1 0x4045c
2893#define SRC_REG_KEYSEARCH_2 0x40460
2894#define SRC_REG_KEYSEARCH_3 0x40464
2895#define SRC_REG_KEYSEARCH_4 0x40468
2896#define SRC_REG_KEYSEARCH_5 0x4046c
2897#define SRC_REG_KEYSEARCH_6 0x40470
2898#define SRC_REG_KEYSEARCH_7 0x40474
2899#define SRC_REG_KEYSEARCH_8 0x40478
2900#define SRC_REG_KEYSEARCH_9 0x4047c
2901#define SRC_REG_LASTFREE0 0x40530
2902#define SRC_REG_NUMBER_HASH_BITS0 0x40400
2903/* [RW 1] Reset internal state machines. */
2904#define SRC_REG_SOFT_RST 0x4049c
2905/* [R 3] Interrupt register #0 read */
2906#define SRC_REG_SRC_INT_STS 0x404ac
2907/* [RW 3] Parity mask register #0 read/write */
2908#define SRC_REG_SRC_PRTY_MASK 0x404c8
2909/* [R 3] Parity register #0 read */
2910#define SRC_REG_SRC_PRTY_STS 0x404bc
2911/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
2912#define TCM_REG_CAM_OCCUP 0x5017c
2913/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
2914 disregarded; valid output is deasserted; all other signals are treated as
2915 usual; if 1 - normal activity. */
2916#define TCM_REG_CDU_AG_RD_IFEN 0x50034
2917/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
2918 are disregarded; all other signals are treated as usual; if 1 - normal
2919 activity. */
2920#define TCM_REG_CDU_AG_WR_IFEN 0x50030
2921/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
2922 disregarded; valid output is deasserted; all other signals are treated as
2923 usual; if 1 - normal activity. */
2924#define TCM_REG_CDU_SM_RD_IFEN 0x5003c
2925/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
2926 input is disregarded; all other signals are treated as usual; if 1 -
2927 normal activity. */
2928#define TCM_REG_CDU_SM_WR_IFEN 0x50038
2929/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
2930 the initial credit value; read returns the current value of the credit
2931 counter. Must be initialized to 1 at start-up. */
2932#define TCM_REG_CFC_INIT_CRD 0x50204
2933/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
2934 weight 8 (the most prioritised); 1 stands for weight 1(least
2935 prioritised); 2 stands for weight 2; tc. */
2936#define TCM_REG_CP_WEIGHT 0x500c0
2937/* [RW 1] Input csem Interface enable. If 0 - the valid input is
2938 disregarded; acknowledge output is deasserted; all other signals are
2939 treated as usual; if 1 - normal activity. */
2940#define TCM_REG_CSEM_IFEN 0x5002c
2941/* [RC 1] Message length mismatch (relative to last indication) at the In#9
2942 interface. */
2943#define TCM_REG_CSEM_LENGTH_MIS 0x50174
2944/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
2945 weight 8 (the most prioritised); 1 stands for weight 1(least
2946 prioritised); 2 stands for weight 2; tc. */
2947#define TCM_REG_CSEM_WEIGHT 0x500bc
2948/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
2949#define TCM_REG_ERR_EVNT_ID 0x500a0
2950/* [RW 28] The CM erroneous header for QM and Timers formatting. */
2951#define TCM_REG_ERR_TCM_HDR 0x5009c
2952/* [RW 8] The Event ID for Timers expiration. */
2953#define TCM_REG_EXPR_EVNT_ID 0x500a4
2954/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
2955 writes the initial credit value; read returns the current value of the
2956 credit counter. Must be initialized to 64 at start-up. */
2957#define TCM_REG_FIC0_INIT_CRD 0x5020c
2958/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
2959 writes the initial credit value; read returns the current value of the
2960 credit counter. Must be initialized to 64 at start-up. */
2961#define TCM_REG_FIC1_INIT_CRD 0x50210
2962/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
2963 - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
2964 ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
2965 ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
2966#define TCM_REG_GR_ARB_TYPE 0x50114
2967/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
2968 highest priority is 3. It is supposed that the Store channel is the
2969 compliment of the other 3 groups. */
2970#define TCM_REG_GR_LD0_PR 0x5011c
2971/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
2972 highest priority is 3. It is supposed that the Store channel is the
2973 compliment of the other 3 groups. */
2974#define TCM_REG_GR_LD1_PR 0x50120
2975/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
2976 sent to STORM; for a specific connection type. The double REG-pairs are
2977 used to align to STORM context row size of 128 bits. The offset of these
2978 data in the STORM context is always 0. Index _i stands for the connection
2979 type (one of 16). */
2980#define TCM_REG_N_SM_CTX_LD_0 0x50050
2981#define TCM_REG_N_SM_CTX_LD_1 0x50054
2982#define TCM_REG_N_SM_CTX_LD_2 0x50058
2983#define TCM_REG_N_SM_CTX_LD_3 0x5005c
2984#define TCM_REG_N_SM_CTX_LD_4 0x50060
2985#define TCM_REG_N_SM_CTX_LD_5 0x50064
2986/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
2987 acknowledge output is deasserted; all other signals are treated as usual;
2988 if 1 - normal activity. */
2989#define TCM_REG_PBF_IFEN 0x50024
2990/* [RC 1] Message length mismatch (relative to last indication) at the In#7
2991 interface. */
2992#define TCM_REG_PBF_LENGTH_MIS 0x5016c
2993/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
2994 weight 8 (the most prioritised); 1 stands for weight 1(least
2995 prioritised); 2 stands for weight 2; tc. */
2996#define TCM_REG_PBF_WEIGHT 0x500b4
2997#define TCM_REG_PHYS_QNUM0_0 0x500e0
2998#define TCM_REG_PHYS_QNUM0_1 0x500e4
2999#define TCM_REG_PHYS_QNUM1_0 0x500e8
3000#define TCM_REG_PHYS_QNUM1_1 0x500ec
3001#define TCM_REG_PHYS_QNUM2_0 0x500f0
3002#define TCM_REG_PHYS_QNUM2_1 0x500f4
3003#define TCM_REG_PHYS_QNUM3_0 0x500f8
3004#define TCM_REG_PHYS_QNUM3_1 0x500fc
3005/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
3006 acknowledge output is deasserted; all other signals are treated as usual;
3007 if 1 - normal activity. */
3008#define TCM_REG_PRS_IFEN 0x50020
3009/* [RC 1] Message length mismatch (relative to last indication) at the In#6
3010 interface. */
3011#define TCM_REG_PRS_LENGTH_MIS 0x50168
3012/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
3013 weight 8 (the most prioritised); 1 stands for weight 1(least
3014 prioritised); 2 stands for weight 2; tc. */
3015#define TCM_REG_PRS_WEIGHT 0x500b0
3016/* [RW 8] The Event ID for Timers formatting in case of stop done. */
3017#define TCM_REG_STOP_EVNT_ID 0x500a8
3018/* [RC 1] Message length mismatch (relative to last indication) at the STORM
3019 interface. */
3020#define TCM_REG_STORM_LENGTH_MIS 0x50160
3021/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
3022 disregarded; acknowledge output is deasserted; all other signals are
3023 treated as usual; if 1 - normal activity. */
3024#define TCM_REG_STORM_TCM_IFEN 0x50010
3025/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
3026 weight 8 (the most prioritised); 1 stands for weight 1(least
3027 prioritised); 2 stands for weight 2; tc. */
3028#define TCM_REG_STORM_WEIGHT 0x500ac
3029/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
3030 acknowledge output is deasserted; all other signals are treated as usual;
3031 if 1 - normal activity. */
3032#define TCM_REG_TCM_CFC_IFEN 0x50040
3033/* [RW 11] Interrupt mask register #0 read/write */
3034#define TCM_REG_TCM_INT_MASK 0x501dc
3035/* [R 11] Interrupt register #0 read */
3036#define TCM_REG_TCM_INT_STS 0x501d0
3037/* [R 27] Parity register #0 read */
3038#define TCM_REG_TCM_PRTY_STS 0x501e0
3039/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
3040 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3041 Is used to determine the number of the AG context REG-pairs written back;
3042 when the input message Reg1WbFlg isn't set. */
3043#define TCM_REG_TCM_REG0_SZ 0x500d8
3044/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
3045 disregarded; valid is deasserted; all other signals are treated as usual;
3046 if 1 - normal activity. */
3047#define TCM_REG_TCM_STORM0_IFEN 0x50004
3048/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
3049 disregarded; valid is deasserted; all other signals are treated as usual;
3050 if 1 - normal activity. */
3051#define TCM_REG_TCM_STORM1_IFEN 0x50008
3052/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
3053 disregarded; valid is deasserted; all other signals are treated as usual;
3054 if 1 - normal activity. */
3055#define TCM_REG_TCM_TQM_IFEN 0x5000c
3056/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
3057#define TCM_REG_TCM_TQM_USE_Q 0x500d4
3058/* [RW 28] The CM header for Timers expiration command. */
3059#define TCM_REG_TM_TCM_HDR 0x50098
3060/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
3061 disregarded; acknowledge output is deasserted; all other signals are
3062 treated as usual; if 1 - normal activity. */
3063#define TCM_REG_TM_TCM_IFEN 0x5001c
3064/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
3065 weight 8 (the most prioritised); 1 stands for weight 1(least
3066 prioritised); 2 stands for weight 2; tc. */
3067#define TCM_REG_TM_WEIGHT 0x500d0
3068/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
3069 the initial credit value; read returns the current value of the credit
3070 counter. Must be initialized to 32 at start-up. */
3071#define TCM_REG_TQM_INIT_CRD 0x5021c
3072/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
3073 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3074 prioritised); 2 stands for weight 2; tc. */
3075#define TCM_REG_TQM_P_WEIGHT 0x500c8
3076/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
3077 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3078 prioritised); 2 stands for weight 2; tc. */
3079#define TCM_REG_TQM_S_WEIGHT 0x500cc
3080/* [RW 28] The CM header value for QM request (primary). */
3081#define TCM_REG_TQM_TCM_HDR_P 0x50090
3082/* [RW 28] The CM header value for QM request (secondary). */
3083#define TCM_REG_TQM_TCM_HDR_S 0x50094
3084/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
3085 acknowledge output is deasserted; all other signals are treated as usual;
3086 if 1 - normal activity. */
3087#define TCM_REG_TQM_TCM_IFEN 0x50014
3088/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
3089 acknowledge output is deasserted; all other signals are treated as usual;
3090 if 1 - normal activity. */
3091#define TCM_REG_TSDM_IFEN 0x50018
3092/* [RC 1] Message length mismatch (relative to last indication) at the SDM
3093 interface. */
3094#define TCM_REG_TSDM_LENGTH_MIS 0x50164
3095/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
3096 weight 8 (the most prioritised); 1 stands for weight 1(least
3097 prioritised); 2 stands for weight 2; tc. */
3098#define TCM_REG_TSDM_WEIGHT 0x500c4
3099/* [RW 1] Input usem Interface enable. If 0 - the valid input is
3100 disregarded; acknowledge output is deasserted; all other signals are
3101 treated as usual; if 1 - normal activity. */
3102#define TCM_REG_USEM_IFEN 0x50028
3103/* [RC 1] Message length mismatch (relative to last indication) at the In#8
3104 interface. */
3105#define TCM_REG_USEM_LENGTH_MIS 0x50170
3106/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
3107 weight 8 (the most prioritised); 1 stands for weight 1(least
3108 prioritised); 2 stands for weight 2; tc. */
3109#define TCM_REG_USEM_WEIGHT 0x500b8
3110/* [RW 21] Indirect access to the descriptor table of the XX protection
3111 mechanism. The fields are: [5:0] - length of the message; 15:6] - message
3112 pointer; 20:16] - next pointer. */
3113#define TCM_REG_XX_DESCR_TABLE 0x50280
3114#define TCM_REG_XX_DESCR_TABLE_SIZE 32
3115/* [R 6] Use to read the value of XX protection Free counter. */
3116#define TCM_REG_XX_FREE 0x50178
3117/* [RW 6] Initial value for the credit counter; responsible for fulfilling
3118 of the Input Stage XX protection buffer by the XX protection pending
3119 messages. Max credit available - 127.Write writes the initial credit
3120 value; read returns the current value of the credit counter. Must be
3121 initialized to 19 at start-up. */
3122#define TCM_REG_XX_INIT_CRD 0x50220
3123/* [RW 6] Maximum link list size (messages locked) per connection in the XX
3124 protection. */
3125#define TCM_REG_XX_MAX_LL_SZ 0x50044
3126/* [RW 6] The maximum number of pending messages; which may be stored in XX
3127 protection. ~tcm_registers_xx_free.xx_free is read on read. */
3128#define TCM_REG_XX_MSG_NUM 0x50224
3129/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
3130#define TCM_REG_XX_OVFL_EVNT_ID 0x50048
3131/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
3132 The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
3133 header pointer. */
3134#define TCM_REG_XX_TABLE 0x50240
3135/* [RW 4] Load value for cfc ac credit cnt. */
3136#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
3137/* [RW 4] Load value for cfc cld credit cnt. */
3138#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
3139/* [RW 8] Client0 context region. */
3140#define TM_REG_CL0_CONT_REGION 0x164030
3141/* [RW 8] Client1 context region. */
3142#define TM_REG_CL1_CONT_REGION 0x164034
3143/* [RW 8] Client2 context region. */
3144#define TM_REG_CL2_CONT_REGION 0x164038
3145/* [RW 2] Client in High priority client number. */
3146#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024
3147/* [RW 4] Load value for clout0 cred cnt. */
3148#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220
3149/* [RW 4] Load value for clout1 cred cnt. */
3150#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228
3151/* [RW 4] Load value for clout2 cred cnt. */
3152#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230
3153/* [RW 1] Enable client0 input. */
3154#define TM_REG_EN_CL0_INPUT 0x164008
3155/* [RW 1] Enable client1 input. */
3156#define TM_REG_EN_CL1_INPUT 0x16400c
3157/* [RW 1] Enable client2 input. */
3158#define TM_REG_EN_CL2_INPUT 0x164010
3159#define TM_REG_EN_LINEAR0_TIMER 0x164014
3160/* [RW 1] Enable real time counter. */
3161#define TM_REG_EN_REAL_TIME_CNT 0x1640d8
3162/* [RW 1] Enable for Timers state machines. */
3163#define TM_REG_EN_TIMERS 0x164000
3164/* [RW 4] Load value for expiration credit cnt. CFC max number of
3165 outstanding load requests for timers (expiration) context loading. */
3166#define TM_REG_EXP_CRDCNT_VAL 0x164238
3167/* [RW 32] Linear0 logic address. */
3168#define TM_REG_LIN0_LOGIC_ADDR 0x164240
3169/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
3170#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
3171/* [WB 64] Linear0 phy address. */
3172#define TM_REG_LIN0_PHY_ADDR 0x164270
3173/* [RW 1] Linear0 physical address valid. */
3174#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
3175#define TM_REG_LIN0_SCAN_ON 0x1640d0
3176/* [RW 24] Linear0 array scan timeout. */
3177#define TM_REG_LIN0_SCAN_TIME 0x16403c
3178/* [RW 32] Linear1 logic address. */
3179#define TM_REG_LIN1_LOGIC_ADDR 0x164250
3180/* [WB 64] Linear1 phy address. */
3181#define TM_REG_LIN1_PHY_ADDR 0x164280
3182/* [RW 1] Linear1 physical address valid. */
3183#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
3184/* [RW 6] Linear timer set_clear fifo threshold. */
3185#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
3186/* [RW 2] Load value for pci arbiter credit cnt. */
3187#define TM_REG_PCIARB_CRDCNT_VAL 0x164260
3188/* [RW 20] The amount of hardware cycles for each timer tick. */
3189#define TM_REG_TIMER_TICK_SIZE 0x16401c
3190/* [RW 8] Timers Context region. */
3191#define TM_REG_TM_CONTEXT_REGION 0x164044
3192/* [RW 1] Interrupt mask register #0 read/write */
3193#define TM_REG_TM_INT_MASK 0x1640fc
3194/* [R 1] Interrupt register #0 read */
3195#define TM_REG_TM_INT_STS 0x1640f0
3196/* [RW 8] The event id for aggregated interrupt 0 */
3197#define TSDM_REG_AGG_INT_EVENT_0 0x42038
3198#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
3199#define TSDM_REG_AGG_INT_EVENT_2 0x42040
3200#define TSDM_REG_AGG_INT_EVENT_3 0x42044
3201#define TSDM_REG_AGG_INT_EVENT_4 0x42048
3202/* [RW 1] The T bit for aggregated interrupt 0 */
3203#define TSDM_REG_AGG_INT_T_0 0x420b8
3204#define TSDM_REG_AGG_INT_T_1 0x420bc
3205/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
3206#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
3207/* [RW 16] The maximum value of the competion counter #0 */
3208#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
3209/* [RW 16] The maximum value of the competion counter #1 */
3210#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
3211/* [RW 16] The maximum value of the competion counter #2 */
3212#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
3213/* [RW 16] The maximum value of the competion counter #3 */
3214#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
3215/* [RW 13] The start address in the internal RAM for the completion
3216 counters. */
3217#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c
3218#define TSDM_REG_ENABLE_IN1 0x42238
3219#define TSDM_REG_ENABLE_IN2 0x4223c
3220#define TSDM_REG_ENABLE_OUT1 0x42240
3221#define TSDM_REG_ENABLE_OUT2 0x42244
3222/* [RW 4] The initial number of messages that can be sent to the pxp control
3223 interface without receiving any ACK. */
3224#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc
3225/* [ST 32] The number of ACK after placement messages received */
3226#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c
3227/* [ST 32] The number of packet end messages received from the parser */
3228#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274
3229/* [ST 32] The number of requests received from the pxp async if */
3230#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278
3231/* [ST 32] The number of commands received in queue 0 */
3232#define TSDM_REG_NUM_OF_Q0_CMD 0x42248
3233/* [ST 32] The number of commands received in queue 10 */
3234#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c
3235/* [ST 32] The number of commands received in queue 11 */
3236#define TSDM_REG_NUM_OF_Q11_CMD 0x42270
3237/* [ST 32] The number of commands received in queue 1 */
3238#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c
3239/* [ST 32] The number of commands received in queue 3 */
3240#define TSDM_REG_NUM_OF_Q3_CMD 0x42250
3241/* [ST 32] The number of commands received in queue 4 */
3242#define TSDM_REG_NUM_OF_Q4_CMD 0x42254
3243/* [ST 32] The number of commands received in queue 5 */
3244#define TSDM_REG_NUM_OF_Q5_CMD 0x42258
3245/* [ST 32] The number of commands received in queue 6 */
3246#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c
3247/* [ST 32] The number of commands received in queue 7 */
3248#define TSDM_REG_NUM_OF_Q7_CMD 0x42260
3249/* [ST 32] The number of commands received in queue 8 */
3250#define TSDM_REG_NUM_OF_Q8_CMD 0x42264
3251/* [ST 32] The number of commands received in queue 9 */
3252#define TSDM_REG_NUM_OF_Q9_CMD 0x42268
3253/* [RW 13] The start address in the internal RAM for the packet end message */
3254#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014
3255/* [RW 13] The start address in the internal RAM for queue counters */
3256#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010
3257/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
3258#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548
3259/* [R 1] parser fifo empty in sdm_sync block */
3260#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550
3261/* [R 1] parser serial fifo empty in sdm_sync block */
3262#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558
3263/* [RW 32] Tick for timer counter. Applicable only when
3264 ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
3265#define TSDM_REG_TIMER_TICK 0x42000
3266/* [RW 32] Interrupt mask register #0 read/write */
3267#define TSDM_REG_TSDM_INT_MASK_0 0x4229c
3268#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
3269/* [R 32] Interrupt register #0 read */
3270#define TSDM_REG_TSDM_INT_STS_0 0x42290
3271#define TSDM_REG_TSDM_INT_STS_1 0x422a0
3272/* [RW 11] Parity mask register #0 read/write */
3273#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
3274/* [R 11] Parity register #0 read */
3275#define TSDM_REG_TSDM_PRTY_STS 0x422b0
3276/* [RW 5] The number of time_slots in the arbitration cycle */
3277#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
3278/* [RW 3] The source that is associated with arbitration element 0. Source
3279 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3280 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
3281#define TSEM_REG_ARB_ELEMENT0 0x180020
3282/* [RW 3] The source that is associated with arbitration element 1. Source
3283 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3284 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3285 Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
3286#define TSEM_REG_ARB_ELEMENT1 0x180024
3287/* [RW 3] The source that is associated with arbitration element 2. Source
3288 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3289 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3290 Could not be equal to register ~tsem_registers_arb_element0.arb_element0
3291 and ~tsem_registers_arb_element1.arb_element1 */
3292#define TSEM_REG_ARB_ELEMENT2 0x180028
3293/* [RW 3] The source that is associated with arbitration element 3. Source
3294 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3295 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
3296 not be equal to register ~tsem_registers_arb_element0.arb_element0 and
3297 ~tsem_registers_arb_element1.arb_element1 and
3298 ~tsem_registers_arb_element2.arb_element2 */
3299#define TSEM_REG_ARB_ELEMENT3 0x18002c
3300/* [RW 3] The source that is associated with arbitration element 4. Source
3301 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3302 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3303 Could not be equal to register ~tsem_registers_arb_element0.arb_element0
3304 and ~tsem_registers_arb_element1.arb_element1 and
3305 ~tsem_registers_arb_element2.arb_element2 and
3306 ~tsem_registers_arb_element3.arb_element3 */
3307#define TSEM_REG_ARB_ELEMENT4 0x180030
3308#define TSEM_REG_ENABLE_IN 0x1800a4
3309#define TSEM_REG_ENABLE_OUT 0x1800a8
3310/* [RW 32] This address space contains all registers and memories that are
3311 placed in SEM_FAST block. The SEM_FAST registers are described in
3312 appendix B. In order to access the sem_fast registers the base address
3313 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
3314#define TSEM_REG_FAST_MEMORY 0x1a0000
3315/* [RW 1] Disables input messages from FIC0 May be updated during run_time
3316 by the microcode */
3317#define TSEM_REG_FIC0_DISABLE 0x180224
3318/* [RW 1] Disables input messages from FIC1 May be updated during run_time
3319 by the microcode */
3320#define TSEM_REG_FIC1_DISABLE 0x180234
3321/* [RW 15] Interrupt table Read and write access to it is not possible in
3322 the middle of the work */
3323#define TSEM_REG_INT_TABLE 0x180400
3324/* [ST 24] Statistics register. The number of messages that entered through
3325 FIC0 */
3326#define TSEM_REG_MSG_NUM_FIC0 0x180000
3327/* [ST 24] Statistics register. The number of messages that entered through
3328 FIC1 */
3329#define TSEM_REG_MSG_NUM_FIC1 0x180004
3330/* [ST 24] Statistics register. The number of messages that were sent to
3331 FOC0 */
3332#define TSEM_REG_MSG_NUM_FOC0 0x180008
3333/* [ST 24] Statistics register. The number of messages that were sent to
3334 FOC1 */
3335#define TSEM_REG_MSG_NUM_FOC1 0x18000c
3336/* [ST 24] Statistics register. The number of messages that were sent to
3337 FOC2 */
3338#define TSEM_REG_MSG_NUM_FOC2 0x180010
3339/* [ST 24] Statistics register. The number of messages that were sent to
3340 FOC3 */
3341#define TSEM_REG_MSG_NUM_FOC3 0x180014
3342/* [RW 1] Disables input messages from the passive buffer May be updated
3343 during run_time by the microcode */
3344#define TSEM_REG_PAS_DISABLE 0x18024c
3345/* [WB 128] Debug only. Passive buffer memory */
3346#define TSEM_REG_PASSIVE_BUFFER 0x181000
3347/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
3348#define TSEM_REG_PRAM 0x1c0000
3349/* [R 8] Valid sleeping threads indication have bit per thread */
3350#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c
3351/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
3352#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
3353/* [RW 8] List of free threads . There is a bit per thread. */
3354#define TSEM_REG_THREADS_LIST 0x1802e4
3355/* [RW 3] The arbitration scheme of time_slot 0 */
3356#define TSEM_REG_TS_0_AS 0x180038
3357/* [RW 3] The arbitration scheme of time_slot 10 */
3358#define TSEM_REG_TS_10_AS 0x180060
3359/* [RW 3] The arbitration scheme of time_slot 11 */
3360#define TSEM_REG_TS_11_AS 0x180064
3361/* [RW 3] The arbitration scheme of time_slot 12 */
3362#define TSEM_REG_TS_12_AS 0x180068
3363/* [RW 3] The arbitration scheme of time_slot 13 */
3364#define TSEM_REG_TS_13_AS 0x18006c
3365/* [RW 3] The arbitration scheme of time_slot 14 */
3366#define TSEM_REG_TS_14_AS 0x180070
3367/* [RW 3] The arbitration scheme of time_slot 15 */
3368#define TSEM_REG_TS_15_AS 0x180074
3369/* [RW 3] The arbitration scheme of time_slot 16 */
3370#define TSEM_REG_TS_16_AS 0x180078
3371/* [RW 3] The arbitration scheme of time_slot 17 */
3372#define TSEM_REG_TS_17_AS 0x18007c
3373/* [RW 3] The arbitration scheme of time_slot 18 */
3374#define TSEM_REG_TS_18_AS 0x180080
3375/* [RW 3] The arbitration scheme of time_slot 1 */
3376#define TSEM_REG_TS_1_AS 0x18003c
3377/* [RW 3] The arbitration scheme of time_slot 2 */
3378#define TSEM_REG_TS_2_AS 0x180040
3379/* [RW 3] The arbitration scheme of time_slot 3 */
3380#define TSEM_REG_TS_3_AS 0x180044
3381/* [RW 3] The arbitration scheme of time_slot 4 */
3382#define TSEM_REG_TS_4_AS 0x180048
3383/* [RW 3] The arbitration scheme of time_slot 5 */
3384#define TSEM_REG_TS_5_AS 0x18004c
3385/* [RW 3] The arbitration scheme of time_slot 6 */
3386#define TSEM_REG_TS_6_AS 0x180050
3387/* [RW 3] The arbitration scheme of time_slot 7 */
3388#define TSEM_REG_TS_7_AS 0x180054
3389/* [RW 3] The arbitration scheme of time_slot 8 */
3390#define TSEM_REG_TS_8_AS 0x180058
3391/* [RW 3] The arbitration scheme of time_slot 9 */
3392#define TSEM_REG_TS_9_AS 0x18005c
3393/* [RW 32] Interrupt mask register #0 read/write */
3394#define TSEM_REG_TSEM_INT_MASK_0 0x180100
3395#define TSEM_REG_TSEM_INT_MASK_1 0x180110
3396/* [R 32] Interrupt register #0 read */
3397#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
3398#define TSEM_REG_TSEM_INT_STS_1 0x180104
3399/* [RW 32] Parity mask register #0 read/write */
3400#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
3401#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
3402/* [R 32] Parity register #0 read */
3403#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
3404#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
3405/* [R 5] Used to read the XX protection CAM occupancy counter. */
3406#define UCM_REG_CAM_OCCUP 0xe0170
3407/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
3408 disregarded; valid output is deasserted; all other signals are treated as
3409 usual; if 1 - normal activity. */
3410#define UCM_REG_CDU_AG_RD_IFEN 0xe0038
3411/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
3412 are disregarded; all other signals are treated as usual; if 1 - normal
3413 activity. */
3414#define UCM_REG_CDU_AG_WR_IFEN 0xe0034
3415/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
3416 disregarded; valid output is deasserted; all other signals are treated as
3417 usual; if 1 - normal activity. */
3418#define UCM_REG_CDU_SM_RD_IFEN 0xe0040
3419/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
3420 input is disregarded; all other signals are treated as usual; if 1 -
3421 normal activity. */
3422#define UCM_REG_CDU_SM_WR_IFEN 0xe003c
3423/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
3424 the initial credit value; read returns the current value of the credit
3425 counter. Must be initialized to 1 at start-up. */
3426#define UCM_REG_CFC_INIT_CRD 0xe0204
3427/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
3428 weight 8 (the most prioritised); 1 stands for weight 1(least
3429 prioritised); 2 stands for weight 2; tc. */
3430#define UCM_REG_CP_WEIGHT 0xe00c4
3431/* [RW 1] Input csem Interface enable. If 0 - the valid input is
3432 disregarded; acknowledge output is deasserted; all other signals are
3433 treated as usual; if 1 - normal activity. */
3434#define UCM_REG_CSEM_IFEN 0xe0028
3435/* [RC 1] Set when the message length mismatch (relative to last indication)
3436 at the csem interface is detected. */
3437#define UCM_REG_CSEM_LENGTH_MIS 0xe0160
3438/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
3439 weight 8 (the most prioritised); 1 stands for weight 1(least
3440 prioritised); 2 stands for weight 2; tc. */
3441#define UCM_REG_CSEM_WEIGHT 0xe00b8
3442/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
3443 disregarded; acknowledge output is deasserted; all other signals are
3444 treated as usual; if 1 - normal activity. */
3445#define UCM_REG_DORQ_IFEN 0xe0030
3446/* [RC 1] Set when the message length mismatch (relative to last indication)
3447 at the dorq interface is detected. */
3448#define UCM_REG_DORQ_LENGTH_MIS 0xe0168
3449/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
3450 weight 8 (the most prioritised); 1 stands for weight 1(least
3451 prioritised); 2 stands for weight 2; tc. */
3452#define UCM_REG_DORQ_WEIGHT 0xe00c0
3453/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
3454#define UCM_REG_ERR_EVNT_ID 0xe00a4
3455/* [RW 28] The CM erroneous header for QM and Timers formatting. */
3456#define UCM_REG_ERR_UCM_HDR 0xe00a0
3457/* [RW 8] The Event ID for Timers expiration. */
3458#define UCM_REG_EXPR_EVNT_ID 0xe00a8
3459/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
3460 writes the initial credit value; read returns the current value of the
3461 credit counter. Must be initialized to 64 at start-up. */
3462#define UCM_REG_FIC0_INIT_CRD 0xe020c
3463/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
3464 writes the initial credit value; read returns the current value of the
3465 credit counter. Must be initialized to 64 at start-up. */
3466#define UCM_REG_FIC1_INIT_CRD 0xe0210
3467/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
3468 - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
3469 ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
3470 ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
3471#define UCM_REG_GR_ARB_TYPE 0xe0144
3472/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
3473 highest priority is 3. It is supposed that the Store channel group is
3474 compliment to the others. */
3475#define UCM_REG_GR_LD0_PR 0xe014c
3476/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
3477 highest priority is 3. It is supposed that the Store channel group is
3478 compliment to the others. */
3479#define UCM_REG_GR_LD1_PR 0xe0150
3480/* [RW 2] The queue index for invalidate counter flag decision. */
3481#define UCM_REG_INV_CFLG_Q 0xe00e4
3482/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
3483 sent to STORM; for a specific connection type. the double REG-pairs are
3484 used in order to align to STORM context row size of 128 bits. The offset
3485 of these data in the STORM context is always 0. Index _i stands for the
3486 connection type (one of 16). */
3487#define UCM_REG_N_SM_CTX_LD_0 0xe0054
3488#define UCM_REG_N_SM_CTX_LD_1 0xe0058
3489#define UCM_REG_N_SM_CTX_LD_2 0xe005c
3490#define UCM_REG_N_SM_CTX_LD_3 0xe0060
3491#define UCM_REG_N_SM_CTX_LD_4 0xe0064
3492#define UCM_REG_N_SM_CTX_LD_5 0xe0068
3493#define UCM_REG_PHYS_QNUM0_0 0xe0110
3494#define UCM_REG_PHYS_QNUM0_1 0xe0114
3495#define UCM_REG_PHYS_QNUM1_0 0xe0118
3496#define UCM_REG_PHYS_QNUM1_1 0xe011c
3497#define UCM_REG_PHYS_QNUM2_0 0xe0120
3498#define UCM_REG_PHYS_QNUM2_1 0xe0124
3499#define UCM_REG_PHYS_QNUM3_0 0xe0128
3500#define UCM_REG_PHYS_QNUM3_1 0xe012c
3501/* [RW 8] The Event ID for Timers formatting in case of stop done. */
3502#define UCM_REG_STOP_EVNT_ID 0xe00ac
3503/* [RC 1] Set when the message length mismatch (relative to last indication)
3504 at the STORM interface is detected. */
3505#define UCM_REG_STORM_LENGTH_MIS 0xe0154
3506/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
3507 disregarded; acknowledge output is deasserted; all other signals are
3508 treated as usual; if 1 - normal activity. */
3509#define UCM_REG_STORM_UCM_IFEN 0xe0010
3510/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
3511 weight 8 (the most prioritised); 1 stands for weight 1(least
3512 prioritised); 2 stands for weight 2; tc. */
3513#define UCM_REG_STORM_WEIGHT 0xe00b0
3514/* [RW 4] Timers output initial credit. Max credit available - 15.Write
3515 writes the initial credit value; read returns the current value of the
3516 credit counter. Must be initialized to 4 at start-up. */
3517#define UCM_REG_TM_INIT_CRD 0xe021c
3518/* [RW 28] The CM header for Timers expiration command. */
3519#define UCM_REG_TM_UCM_HDR 0xe009c
3520/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
3521 disregarded; acknowledge output is deasserted; all other signals are
3522 treated as usual; if 1 - normal activity. */
3523#define UCM_REG_TM_UCM_IFEN 0xe001c
3524/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
3525 weight 8 (the most prioritised); 1 stands for weight 1(least
3526 prioritised); 2 stands for weight 2; tc. */
3527#define UCM_REG_TM_WEIGHT 0xe00d4
3528/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
3529 disregarded; acknowledge output is deasserted; all other signals are
3530 treated as usual; if 1 - normal activity. */
3531#define UCM_REG_TSEM_IFEN 0xe0024
3532/* [RC 1] Set when the message length mismatch (relative to last indication)
3533 at the tsem interface is detected. */
3534#define UCM_REG_TSEM_LENGTH_MIS 0xe015c
3535/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
3536 weight 8 (the most prioritised); 1 stands for weight 1(least
3537 prioritised); 2 stands for weight 2; tc. */
3538#define UCM_REG_TSEM_WEIGHT 0xe00b4
3539/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
3540 acknowledge output is deasserted; all other signals are treated as usual;
3541 if 1 - normal activity. */
3542#define UCM_REG_UCM_CFC_IFEN 0xe0044
3543/* [RW 11] Interrupt mask register #0 read/write */
3544#define UCM_REG_UCM_INT_MASK 0xe01d4
3545/* [R 11] Interrupt register #0 read */
3546#define UCM_REG_UCM_INT_STS 0xe01c8
3547/* [R 27] Parity register #0 read */
3548#define UCM_REG_UCM_PRTY_STS 0xe01d8
3549/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
3550 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3551 Is used to determine the number of the AG context REG-pairs written back;
3552 when the Reg1WbFlg isn't set. */
3553#define UCM_REG_UCM_REG0_SZ 0xe00dc
3554/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
3555 disregarded; valid is deasserted; all other signals are treated as usual;
3556 if 1 - normal activity. */
3557#define UCM_REG_UCM_STORM0_IFEN 0xe0004
3558/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
3559 disregarded; valid is deasserted; all other signals are treated as usual;
3560 if 1 - normal activity. */
3561#define UCM_REG_UCM_STORM1_IFEN 0xe0008
3562/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
3563 disregarded; acknowledge output is deasserted; all other signals are
3564 treated as usual; if 1 - normal activity. */
3565#define UCM_REG_UCM_TM_IFEN 0xe0020
3566/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
3567 disregarded; valid is deasserted; all other signals are treated as usual;
3568 if 1 - normal activity. */
3569#define UCM_REG_UCM_UQM_IFEN 0xe000c
3570/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
3571#define UCM_REG_UCM_UQM_USE_Q 0xe00d8
3572/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
3573 the initial credit value; read returns the current value of the credit
3574 counter. Must be initialized to 32 at start-up. */
3575#define UCM_REG_UQM_INIT_CRD 0xe0220
3576/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
3577 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3578 prioritised); 2 stands for weight 2; tc. */
3579#define UCM_REG_UQM_P_WEIGHT 0xe00cc
3580/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
3581 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3582 prioritised); 2 stands for weight 2; tc. */
3583#define UCM_REG_UQM_S_WEIGHT 0xe00d0
3584/* [RW 28] The CM header value for QM request (primary). */
3585#define UCM_REG_UQM_UCM_HDR_P 0xe0094
3586/* [RW 28] The CM header value for QM request (secondary). */
3587#define UCM_REG_UQM_UCM_HDR_S 0xe0098
3588/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
3589 acknowledge output is deasserted; all other signals are treated as usual;
3590 if 1 - normal activity. */
3591#define UCM_REG_UQM_UCM_IFEN 0xe0014
3592/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
3593 acknowledge output is deasserted; all other signals are treated as usual;
3594 if 1 - normal activity. */
3595#define UCM_REG_USDM_IFEN 0xe0018
3596/* [RC 1] Set when the message length mismatch (relative to last indication)
3597 at the SDM interface is detected. */
3598#define UCM_REG_USDM_LENGTH_MIS 0xe0158
3599/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
3600 weight 8 (the most prioritised); 1 stands for weight 1(least
3601 prioritised); 2 stands for weight 2; tc. */
3602#define UCM_REG_USDM_WEIGHT 0xe00c8
3603/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
3604 disregarded; acknowledge output is deasserted; all other signals are
3605 treated as usual; if 1 - normal activity. */
3606#define UCM_REG_XSEM_IFEN 0xe002c
3607/* [RC 1] Set when the message length mismatch (relative to last indication)
3608 at the xsem interface isdetected. */
3609#define UCM_REG_XSEM_LENGTH_MIS 0xe0164
3610/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
3611 weight 8 (the most prioritised); 1 stands for weight 1(least
3612 prioritised); 2 stands for weight 2; tc. */
3613#define UCM_REG_XSEM_WEIGHT 0xe00bc
3614/* [RW 20] Indirect access to the descriptor table of the XX protection
3615 mechanism. The fields are:[5:0] - message length; 14:6] - message
3616 pointer; 19:15] - next pointer. */
3617#define UCM_REG_XX_DESCR_TABLE 0xe0280
3618#define UCM_REG_XX_DESCR_TABLE_SIZE 32
3619/* [R 6] Use to read the XX protection Free counter. */
3620#define UCM_REG_XX_FREE 0xe016c
3621/* [RW 6] Initial value for the credit counter; responsible for fulfilling
3622 of the Input Stage XX protection buffer by the XX protection pending
3623 messages. Write writes the initial credit value; read returns the current
3624 value of the credit counter. Must be initialized to 12 at start-up. */
3625#define UCM_REG_XX_INIT_CRD 0xe0224
3626/* [RW 6] The maximum number of pending messages; which may be stored in XX
3627 protection. ~ucm_registers_xx_free.xx_free read on read. */
3628#define UCM_REG_XX_MSG_NUM 0xe0228
3629/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
3630#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c
3631/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
3632 The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
3633 header pointer. */
3634#define UCM_REG_XX_TABLE 0xe0300
3635/* [RW 8] The event id for aggregated interrupt 0 */
3636#define USDM_REG_AGG_INT_EVENT_0 0xc4038
3637#define USDM_REG_AGG_INT_EVENT_1 0xc403c
3638#define USDM_REG_AGG_INT_EVENT_2 0xc4040
3639#define USDM_REG_AGG_INT_EVENT_4 0xc4048
3640#define USDM_REG_AGG_INT_EVENT_5 0xc404c
3641#define USDM_REG_AGG_INT_EVENT_6 0xc4050
3642/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
3643 or auto-mask-mode (1) */
3644#define USDM_REG_AGG_INT_MODE_0 0xc41b8
3645#define USDM_REG_AGG_INT_MODE_1 0xc41bc
3646#define USDM_REG_AGG_INT_MODE_4 0xc41c8
3647#define USDM_REG_AGG_INT_MODE_5 0xc41cc
3648#define USDM_REG_AGG_INT_MODE_6 0xc41d0
3649/* [RW 1] The T bit for aggregated interrupt 5 */
3650#define USDM_REG_AGG_INT_T_5 0xc40cc
3651#define USDM_REG_AGG_INT_T_6 0xc40d0
3652/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
3653#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
3654/* [RW 16] The maximum value of the competion counter #0 */
3655#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
3656/* [RW 16] The maximum value of the competion counter #1 */
3657#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
3658/* [RW 16] The maximum value of the competion counter #2 */
3659#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
3660/* [RW 16] The maximum value of the competion counter #3 */
3661#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
3662/* [RW 13] The start address in the internal RAM for the completion
3663 counters. */
3664#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c
3665#define USDM_REG_ENABLE_IN1 0xc4238
3666#define USDM_REG_ENABLE_IN2 0xc423c
3667#define USDM_REG_ENABLE_OUT1 0xc4240
3668#define USDM_REG_ENABLE_OUT2 0xc4244
3669/* [RW 4] The initial number of messages that can be sent to the pxp control
3670 interface without receiving any ACK. */
3671#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0
3672/* [ST 32] The number of ACK after placement messages received */
3673#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280
3674/* [ST 32] The number of packet end messages received from the parser */
3675#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278
3676/* [ST 32] The number of requests received from the pxp async if */
3677#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c
3678/* [ST 32] The number of commands received in queue 0 */
3679#define USDM_REG_NUM_OF_Q0_CMD 0xc4248
3680/* [ST 32] The number of commands received in queue 10 */
3681#define USDM_REG_NUM_OF_Q10_CMD 0xc4270
3682/* [ST 32] The number of commands received in queue 11 */
3683#define USDM_REG_NUM_OF_Q11_CMD 0xc4274
3684/* [ST 32] The number of commands received in queue 1 */
3685#define USDM_REG_NUM_OF_Q1_CMD 0xc424c
3686/* [ST 32] The number of commands received in queue 2 */
3687#define USDM_REG_NUM_OF_Q2_CMD 0xc4250
3688/* [ST 32] The number of commands received in queue 3 */
3689#define USDM_REG_NUM_OF_Q3_CMD 0xc4254
3690/* [ST 32] The number of commands received in queue 4 */
3691#define USDM_REG_NUM_OF_Q4_CMD 0xc4258
3692/* [ST 32] The number of commands received in queue 5 */
3693#define USDM_REG_NUM_OF_Q5_CMD 0xc425c
3694/* [ST 32] The number of commands received in queue 6 */
3695#define USDM_REG_NUM_OF_Q6_CMD 0xc4260
3696/* [ST 32] The number of commands received in queue 7 */
3697#define USDM_REG_NUM_OF_Q7_CMD 0xc4264
3698/* [ST 32] The number of commands received in queue 8 */
3699#define USDM_REG_NUM_OF_Q8_CMD 0xc4268
3700/* [ST 32] The number of commands received in queue 9 */
3701#define USDM_REG_NUM_OF_Q9_CMD 0xc426c
3702/* [RW 13] The start address in the internal RAM for the packet end message */
3703#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014
3704/* [RW 13] The start address in the internal RAM for queue counters */
3705#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010
3706/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
3707#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550
3708/* [R 1] parser fifo empty in sdm_sync block */
3709#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558
3710/* [R 1] parser serial fifo empty in sdm_sync block */
3711#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560
3712/* [RW 32] Tick for timer counter. Applicable only when
3713 ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
3714#define USDM_REG_TIMER_TICK 0xc4000
3715/* [RW 32] Interrupt mask register #0 read/write */
3716#define USDM_REG_USDM_INT_MASK_0 0xc42a0
3717#define USDM_REG_USDM_INT_MASK_1 0xc42b0
3718/* [R 32] Interrupt register #0 read */
3719#define USDM_REG_USDM_INT_STS_0 0xc4294
3720#define USDM_REG_USDM_INT_STS_1 0xc42a4
3721/* [RW 11] Parity mask register #0 read/write */
3722#define USDM_REG_USDM_PRTY_MASK 0xc42c0
3723/* [R 11] Parity register #0 read */
3724#define USDM_REG_USDM_PRTY_STS 0xc42b4
3725/* [RW 5] The number of time_slots in the arbitration cycle */
3726#define USEM_REG_ARB_CYCLE_SIZE 0x300034
3727/* [RW 3] The source that is associated with arbitration element 0. Source
3728 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3729 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
3730#define USEM_REG_ARB_ELEMENT0 0x300020
3731/* [RW 3] The source that is associated with arbitration element 1. Source
3732 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3733 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3734 Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
3735#define USEM_REG_ARB_ELEMENT1 0x300024
3736/* [RW 3] The source that is associated with arbitration element 2. Source
3737 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3738 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3739 Could not be equal to register ~usem_registers_arb_element0.arb_element0
3740 and ~usem_registers_arb_element1.arb_element1 */
3741#define USEM_REG_ARB_ELEMENT2 0x300028
3742/* [RW 3] The source that is associated with arbitration element 3. Source
3743 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3744 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
3745 not be equal to register ~usem_registers_arb_element0.arb_element0 and
3746 ~usem_registers_arb_element1.arb_element1 and
3747 ~usem_registers_arb_element2.arb_element2 */
3748#define USEM_REG_ARB_ELEMENT3 0x30002c
3749/* [RW 3] The source that is associated with arbitration element 4. Source
3750 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3751 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3752 Could not be equal to register ~usem_registers_arb_element0.arb_element0
3753 and ~usem_registers_arb_element1.arb_element1 and
3754 ~usem_registers_arb_element2.arb_element2 and
3755 ~usem_registers_arb_element3.arb_element3 */
3756#define USEM_REG_ARB_ELEMENT4 0x300030
3757#define USEM_REG_ENABLE_IN 0x3000a4
3758#define USEM_REG_ENABLE_OUT 0x3000a8
3759/* [RW 32] This address space contains all registers and memories that are
3760 placed in SEM_FAST block. The SEM_FAST registers are described in
3761 appendix B. In order to access the sem_fast registers the base address
3762 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
3763#define USEM_REG_FAST_MEMORY 0x320000
3764/* [RW 1] Disables input messages from FIC0 May be updated during run_time
3765 by the microcode */
3766#define USEM_REG_FIC0_DISABLE 0x300224
3767/* [RW 1] Disables input messages from FIC1 May be updated during run_time
3768 by the microcode */
3769#define USEM_REG_FIC1_DISABLE 0x300234
3770/* [RW 15] Interrupt table Read and write access to it is not possible in
3771 the middle of the work */
3772#define USEM_REG_INT_TABLE 0x300400
3773/* [ST 24] Statistics register. The number of messages that entered through
3774 FIC0 */
3775#define USEM_REG_MSG_NUM_FIC0 0x300000
3776/* [ST 24] Statistics register. The number of messages that entered through
3777 FIC1 */
3778#define USEM_REG_MSG_NUM_FIC1 0x300004
3779/* [ST 24] Statistics register. The number of messages that were sent to
3780 FOC0 */
3781#define USEM_REG_MSG_NUM_FOC0 0x300008
3782/* [ST 24] Statistics register. The number of messages that were sent to
3783 FOC1 */
3784#define USEM_REG_MSG_NUM_FOC1 0x30000c
3785/* [ST 24] Statistics register. The number of messages that were sent to
3786 FOC2 */
3787#define USEM_REG_MSG_NUM_FOC2 0x300010
3788/* [ST 24] Statistics register. The number of messages that were sent to
3789 FOC3 */
3790#define USEM_REG_MSG_NUM_FOC3 0x300014
3791/* [RW 1] Disables input messages from the passive buffer May be updated
3792 during run_time by the microcode */
3793#define USEM_REG_PAS_DISABLE 0x30024c
3794/* [WB 128] Debug only. Passive buffer memory */
3795#define USEM_REG_PASSIVE_BUFFER 0x302000
3796/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
3797#define USEM_REG_PRAM 0x340000
3798/* [R 16] Valid sleeping threads indication have bit per thread */
3799#define USEM_REG_SLEEP_THREADS_VALID 0x30026c
3800/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
3801#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0
3802/* [RW 16] List of free threads . There is a bit per thread. */
3803#define USEM_REG_THREADS_LIST 0x3002e4
3804/* [RW 3] The arbitration scheme of time_slot 0 */
3805#define USEM_REG_TS_0_AS 0x300038
3806/* [RW 3] The arbitration scheme of time_slot 10 */
3807#define USEM_REG_TS_10_AS 0x300060
3808/* [RW 3] The arbitration scheme of time_slot 11 */
3809#define USEM_REG_TS_11_AS 0x300064
3810/* [RW 3] The arbitration scheme of time_slot 12 */
3811#define USEM_REG_TS_12_AS 0x300068
3812/* [RW 3] The arbitration scheme of time_slot 13 */
3813#define USEM_REG_TS_13_AS 0x30006c
3814/* [RW 3] The arbitration scheme of time_slot 14 */
3815#define USEM_REG_TS_14_AS 0x300070
3816/* [RW 3] The arbitration scheme of time_slot 15 */
3817#define USEM_REG_TS_15_AS 0x300074
3818/* [RW 3] The arbitration scheme of time_slot 16 */
3819#define USEM_REG_TS_16_AS 0x300078
3820/* [RW 3] The arbitration scheme of time_slot 17 */
3821#define USEM_REG_TS_17_AS 0x30007c
3822/* [RW 3] The arbitration scheme of time_slot 18 */
3823#define USEM_REG_TS_18_AS 0x300080
3824/* [RW 3] The arbitration scheme of time_slot 1 */
3825#define USEM_REG_TS_1_AS 0x30003c
3826/* [RW 3] The arbitration scheme of time_slot 2 */
3827#define USEM_REG_TS_2_AS 0x300040
3828/* [RW 3] The arbitration scheme of time_slot 3 */
3829#define USEM_REG_TS_3_AS 0x300044
3830/* [RW 3] The arbitration scheme of time_slot 4 */
3831#define USEM_REG_TS_4_AS 0x300048
3832/* [RW 3] The arbitration scheme of time_slot 5 */
3833#define USEM_REG_TS_5_AS 0x30004c
3834/* [RW 3] The arbitration scheme of time_slot 6 */
3835#define USEM_REG_TS_6_AS 0x300050
3836/* [RW 3] The arbitration scheme of time_slot 7 */
3837#define USEM_REG_TS_7_AS 0x300054
3838/* [RW 3] The arbitration scheme of time_slot 8 */
3839#define USEM_REG_TS_8_AS 0x300058
3840/* [RW 3] The arbitration scheme of time_slot 9 */
3841#define USEM_REG_TS_9_AS 0x30005c
3842/* [RW 32] Interrupt mask register #0 read/write */
3843#define USEM_REG_USEM_INT_MASK_0 0x300110
3844#define USEM_REG_USEM_INT_MASK_1 0x300120
3845/* [R 32] Interrupt register #0 read */
3846#define USEM_REG_USEM_INT_STS_0 0x300104
3847#define USEM_REG_USEM_INT_STS_1 0x300114
3848/* [RW 32] Parity mask register #0 read/write */
3849#define USEM_REG_USEM_PRTY_MASK_0 0x300130
3850#define USEM_REG_USEM_PRTY_MASK_1 0x300140
3851/* [R 32] Parity register #0 read */
3852#define USEM_REG_USEM_PRTY_STS_0 0x300124
3853#define USEM_REG_USEM_PRTY_STS_1 0x300134
3854/* [RW 2] The queue index for registration on Aux1 counter flag. */
3855#define XCM_REG_AUX1_Q 0x20134
3856/* [RW 2] Per each decision rule the queue index to register to. */
3857#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0
3858/* [R 5] Used to read the XX protection CAM occupancy counter. */
3859#define XCM_REG_CAM_OCCUP 0x20244
3860/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
3861 disregarded; valid output is deasserted; all other signals are treated as
3862 usual; if 1 - normal activity. */
3863#define XCM_REG_CDU_AG_RD_IFEN 0x20044
3864/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
3865 are disregarded; all other signals are treated as usual; if 1 - normal
3866 activity. */
3867#define XCM_REG_CDU_AG_WR_IFEN 0x20040
3868/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
3869 disregarded; valid output is deasserted; all other signals are treated as
3870 usual; if 1 - normal activity. */
3871#define XCM_REG_CDU_SM_RD_IFEN 0x2004c
3872/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
3873 input is disregarded; all other signals are treated as usual; if 1 -
3874 normal activity. */
3875#define XCM_REG_CDU_SM_WR_IFEN 0x20048
3876/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
3877 the initial credit value; read returns the current value of the credit
3878 counter. Must be initialized to 1 at start-up. */
3879#define XCM_REG_CFC_INIT_CRD 0x20404
3880/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
3881 weight 8 (the most prioritised); 1 stands for weight 1(least
3882 prioritised); 2 stands for weight 2; tc. */
3883#define XCM_REG_CP_WEIGHT 0x200dc
3884/* [RW 1] Input csem Interface enable. If 0 - the valid input is
3885 disregarded; acknowledge output is deasserted; all other signals are
3886 treated as usual; if 1 - normal activity. */
3887#define XCM_REG_CSEM_IFEN 0x20028
3888/* [RC 1] Set at message length mismatch (relative to last indication) at
3889 the csem interface. */
3890#define XCM_REG_CSEM_LENGTH_MIS 0x20228
3891/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
3892 weight 8 (the most prioritised); 1 stands for weight 1(least
3893 prioritised); 2 stands for weight 2; tc. */
3894#define XCM_REG_CSEM_WEIGHT 0x200c4
3895/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
3896 disregarded; acknowledge output is deasserted; all other signals are
3897 treated as usual; if 1 - normal activity. */
3898#define XCM_REG_DORQ_IFEN 0x20030
3899/* [RC 1] Set at message length mismatch (relative to last indication) at
3900 the dorq interface. */
3901#define XCM_REG_DORQ_LENGTH_MIS 0x20230
3902/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
3903 weight 8 (the most prioritised); 1 stands for weight 1(least
3904 prioritised); 2 stands for weight 2; tc. */
3905#define XCM_REG_DORQ_WEIGHT 0x200cc
3906/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
3907#define XCM_REG_ERR_EVNT_ID 0x200b0
3908/* [RW 28] The CM erroneous header for QM and Timers formatting. */
3909#define XCM_REG_ERR_XCM_HDR 0x200ac
3910/* [RW 8] The Event ID for Timers expiration. */
3911#define XCM_REG_EXPR_EVNT_ID 0x200b4
3912/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
3913 writes the initial credit value; read returns the current value of the
3914 credit counter. Must be initialized to 64 at start-up. */
3915#define XCM_REG_FIC0_INIT_CRD 0x2040c
3916/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
3917 writes the initial credit value; read returns the current value of the
3918 credit counter. Must be initialized to 64 at start-up. */
3919#define XCM_REG_FIC1_INIT_CRD 0x20410
3920#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
3921#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
3922#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
3923#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
3924/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
3925 - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
3926 ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
3927 ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
3928#define XCM_REG_GR_ARB_TYPE 0x2020c
3929/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
3930 highest priority is 3. It is supposed that the Channel group is the
3931 compliment of the other 3 groups. */
3932#define XCM_REG_GR_LD0_PR 0x20214
3933/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
3934 highest priority is 3. It is supposed that the Channel group is the
3935 compliment of the other 3 groups. */
3936#define XCM_REG_GR_LD1_PR 0x20218
3937/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
3938 disregarded; acknowledge output is deasserted; all other signals are
3939 treated as usual; if 1 - normal activity. */
3940#define XCM_REG_NIG0_IFEN 0x20038
3941/* [RC 1] Set at message length mismatch (relative to last indication) at
3942 the nig0 interface. */
3943#define XCM_REG_NIG0_LENGTH_MIS 0x20238
3944/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
3945 weight 8 (the most prioritised); 1 stands for weight 1(least
3946 prioritised); 2 stands for weight 2; tc. */
3947#define XCM_REG_NIG0_WEIGHT 0x200d4
3948/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
3949 disregarded; acknowledge output is deasserted; all other signals are
3950 treated as usual; if 1 - normal activity. */
3951#define XCM_REG_NIG1_IFEN 0x2003c
3952/* [RC 1] Set at message length mismatch (relative to last indication) at
3953 the nig1 interface. */
3954#define XCM_REG_NIG1_LENGTH_MIS 0x2023c
3955/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
3956 sent to STORM; for a specific connection type. The double REG-pairs are
3957 used in order to align to STORM context row size of 128 bits. The offset
3958 of these data in the STORM context is always 0. Index _i stands for the
3959 connection type (one of 16). */
3960#define XCM_REG_N_SM_CTX_LD_0 0x20060
3961#define XCM_REG_N_SM_CTX_LD_1 0x20064
3962#define XCM_REG_N_SM_CTX_LD_2 0x20068
3963#define XCM_REG_N_SM_CTX_LD_3 0x2006c
3964#define XCM_REG_N_SM_CTX_LD_4 0x20070
3965#define XCM_REG_N_SM_CTX_LD_5 0x20074
3966/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
3967 acknowledge output is deasserted; all other signals are treated as usual;
3968 if 1 - normal activity. */
3969#define XCM_REG_PBF_IFEN 0x20034
3970/* [RC 1] Set at message length mismatch (relative to last indication) at
3971 the pbf interface. */
3972#define XCM_REG_PBF_LENGTH_MIS 0x20234
3973/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
3974 weight 8 (the most prioritised); 1 stands for weight 1(least
3975 prioritised); 2 stands for weight 2; tc. */
3976#define XCM_REG_PBF_WEIGHT 0x200d0
3977#define XCM_REG_PHYS_QNUM3_0 0x20100
3978#define XCM_REG_PHYS_QNUM3_1 0x20104
3979/* [RW 8] The Event ID for Timers formatting in case of stop done. */
3980#define XCM_REG_STOP_EVNT_ID 0x200b8
3981/* [RC 1] Set at message length mismatch (relative to last indication) at
3982 the STORM interface. */
3983#define XCM_REG_STORM_LENGTH_MIS 0x2021c
3984/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
3985 weight 8 (the most prioritised); 1 stands for weight 1(least
3986 prioritised); 2 stands for weight 2; tc. */
3987#define XCM_REG_STORM_WEIGHT 0x200bc
3988/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
3989 disregarded; acknowledge output is deasserted; all other signals are
3990 treated as usual; if 1 - normal activity. */
3991#define XCM_REG_STORM_XCM_IFEN 0x20010
3992/* [RW 4] Timers output initial credit. Max credit available - 15.Write
3993 writes the initial credit value; read returns the current value of the
3994 credit counter. Must be initialized to 4 at start-up. */
3995#define XCM_REG_TM_INIT_CRD 0x2041c
3996/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
3997 weight 8 (the most prioritised); 1 stands for weight 1(least
3998 prioritised); 2 stands for weight 2; tc. */
3999#define XCM_REG_TM_WEIGHT 0x200ec
4000/* [RW 28] The CM header for Timers expiration command. */
4001#define XCM_REG_TM_XCM_HDR 0x200a8
4002/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
4003 disregarded; acknowledge output is deasserted; all other signals are
4004 treated as usual; if 1 - normal activity. */
4005#define XCM_REG_TM_XCM_IFEN 0x2001c
4006/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
4007 disregarded; acknowledge output is deasserted; all other signals are
4008 treated as usual; if 1 - normal activity. */
4009#define XCM_REG_TSEM_IFEN 0x20024
4010/* [RC 1] Set at message length mismatch (relative to last indication) at
4011 the tsem interface. */
4012#define XCM_REG_TSEM_LENGTH_MIS 0x20224
4013/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
4014 weight 8 (the most prioritised); 1 stands for weight 1(least
4015 prioritised); 2 stands for weight 2; tc. */
4016#define XCM_REG_TSEM_WEIGHT 0x200c0
4017/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
4018#define XCM_REG_UNA_GT_NXT_Q 0x20120
4019/* [RW 1] Input usem Interface enable. If 0 - the valid input is
4020 disregarded; acknowledge output is deasserted; all other signals are
4021 treated as usual; if 1 - normal activity. */
4022#define XCM_REG_USEM_IFEN 0x2002c
4023/* [RC 1] Message length mismatch (relative to last indication) at the usem
4024 interface. */
4025#define XCM_REG_USEM_LENGTH_MIS 0x2022c
4026/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
4027 weight 8 (the most prioritised); 1 stands for weight 1(least
4028 prioritised); 2 stands for weight 2; tc. */
4029#define XCM_REG_USEM_WEIGHT 0x200c8
4030#define XCM_REG_WU_DA_CNT_CMD00 0x201d4
4031#define XCM_REG_WU_DA_CNT_CMD01 0x201d8
4032#define XCM_REG_WU_DA_CNT_CMD10 0x201dc
4033#define XCM_REG_WU_DA_CNT_CMD11 0x201e0
4034#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
4035#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
4036#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
4037#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
4038#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
4039#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
4040#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
4041#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
4042/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
4043 acknowledge output is deasserted; all other signals are treated as usual;
4044 if 1 - normal activity. */
4045#define XCM_REG_XCM_CFC_IFEN 0x20050
4046/* [RW 14] Interrupt mask register #0 read/write */
4047#define XCM_REG_XCM_INT_MASK 0x202b4
4048/* [R 14] Interrupt register #0 read */
4049#define XCM_REG_XCM_INT_STS 0x202a8
4050/* [R 30] Parity register #0 read */
4051#define XCM_REG_XCM_PRTY_STS 0x202b8
4052/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
4053 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
4054 Is used to determine the number of the AG context REG-pairs written back;
4055 when the Reg1WbFlg isn't set. */
4056#define XCM_REG_XCM_REG0_SZ 0x200f4
4057/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
4058 disregarded; valid is deasserted; all other signals are treated as usual;
4059 if 1 - normal activity. */
4060#define XCM_REG_XCM_STORM0_IFEN 0x20004
4061/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
4062 disregarded; valid is deasserted; all other signals are treated as usual;
4063 if 1 - normal activity. */
4064#define XCM_REG_XCM_STORM1_IFEN 0x20008
4065/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
4066 disregarded; acknowledge output is deasserted; all other signals are
4067 treated as usual; if 1 - normal activity. */
4068#define XCM_REG_XCM_TM_IFEN 0x20020
4069/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
4070 disregarded; valid is deasserted; all other signals are treated as usual;
4071 if 1 - normal activity. */
4072#define XCM_REG_XCM_XQM_IFEN 0x2000c
4073/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
4074#define XCM_REG_XCM_XQM_USE_Q 0x200f0
4075/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
4076#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc
4077/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
4078 the initial credit value; read returns the current value of the credit
4079 counter. Must be initialized to 32 at start-up. */
4080#define XCM_REG_XQM_INIT_CRD 0x20420
4081/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
4082 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4083 prioritised); 2 stands for weight 2; tc. */
4084#define XCM_REG_XQM_P_WEIGHT 0x200e4
4085/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
4086 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4087 prioritised); 2 stands for weight 2; tc. */
4088#define XCM_REG_XQM_S_WEIGHT 0x200e8
4089/* [RW 28] The CM header value for QM request (primary). */
4090#define XCM_REG_XQM_XCM_HDR_P 0x200a0
4091/* [RW 28] The CM header value for QM request (secondary). */
4092#define XCM_REG_XQM_XCM_HDR_S 0x200a4
4093/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
4094 acknowledge output is deasserted; all other signals are treated as usual;
4095 if 1 - normal activity. */
4096#define XCM_REG_XQM_XCM_IFEN 0x20014
4097/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
4098 acknowledge output is deasserted; all other signals are treated as usual;
4099 if 1 - normal activity. */
4100#define XCM_REG_XSDM_IFEN 0x20018
4101/* [RC 1] Set at message length mismatch (relative to last indication) at
4102 the SDM interface. */
4103#define XCM_REG_XSDM_LENGTH_MIS 0x20220
4104/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
4105 weight 8 (the most prioritised); 1 stands for weight 1(least
4106 prioritised); 2 stands for weight 2; tc. */
4107#define XCM_REG_XSDM_WEIGHT 0x200e0
4108/* [RW 17] Indirect access to the descriptor table of the XX protection
4109 mechanism. The fields are: [5:0] - message length; 11:6] - message
4110 pointer; 16:12] - next pointer. */
4111#define XCM_REG_XX_DESCR_TABLE 0x20480
4112#define XCM_REG_XX_DESCR_TABLE_SIZE 32
4113/* [R 6] Used to read the XX protection Free counter. */
4114#define XCM_REG_XX_FREE 0x20240
4115/* [RW 6] Initial value for the credit counter; responsible for fulfilling
4116 of the Input Stage XX protection buffer by the XX protection pending
4117 messages. Max credit available - 3.Write writes the initial credit value;
4118 read returns the current value of the credit counter. Must be initialized
4119 to 2 at start-up. */
4120#define XCM_REG_XX_INIT_CRD 0x20424
4121/* [RW 6] The maximum number of pending messages; which may be stored in XX
4122 protection. ~xcm_registers_xx_free.xx_free read on read. */
4123#define XCM_REG_XX_MSG_NUM 0x20428
4124/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
4125#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
4126/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
4127 The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
4128 header pointer. */
4129#define XCM_REG_XX_TABLE 0x20500
4130/* [RW 8] The event id for aggregated interrupt 0 */
4131#define XSDM_REG_AGG_INT_EVENT_0 0x166038
4132#define XSDM_REG_AGG_INT_EVENT_1 0x16603c
4133#define XSDM_REG_AGG_INT_EVENT_10 0x166060
4134#define XSDM_REG_AGG_INT_EVENT_11 0x166064
4135#define XSDM_REG_AGG_INT_EVENT_12 0x166068
4136#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
4137#define XSDM_REG_AGG_INT_EVENT_14 0x166070
4138#define XSDM_REG_AGG_INT_EVENT_2 0x166040
4139#define XSDM_REG_AGG_INT_EVENT_3 0x166044
4140#define XSDM_REG_AGG_INT_EVENT_4 0x166048
4141#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
4142#define XSDM_REG_AGG_INT_EVENT_6 0x166050
4143#define XSDM_REG_AGG_INT_EVENT_7 0x166054
4144#define XSDM_REG_AGG_INT_EVENT_8 0x166058
4145#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
4146/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
4147 or auto-mask-mode (1) */
4148#define XSDM_REG_AGG_INT_MODE_0 0x1661b8
4149#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
4150/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
4151#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
4152/* [RW 16] The maximum value of the competion counter #0 */
4153#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
4154/* [RW 16] The maximum value of the competion counter #1 */
4155#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
4156/* [RW 16] The maximum value of the competion counter #2 */
4157#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
4158/* [RW 16] The maximum value of the competion counter #3 */
4159#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
4160/* [RW 13] The start address in the internal RAM for the completion
4161 counters. */
4162#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c
4163#define XSDM_REG_ENABLE_IN1 0x166238
4164#define XSDM_REG_ENABLE_IN2 0x16623c
4165#define XSDM_REG_ENABLE_OUT1 0x166240
4166#define XSDM_REG_ENABLE_OUT2 0x166244
4167/* [RW 4] The initial number of messages that can be sent to the pxp control
4168 interface without receiving any ACK. */
4169#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc
4170/* [ST 32] The number of ACK after placement messages received */
4171#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c
4172/* [ST 32] The number of packet end messages received from the parser */
4173#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274
4174/* [ST 32] The number of requests received from the pxp async if */
4175#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278
4176/* [ST 32] The number of commands received in queue 0 */
4177#define XSDM_REG_NUM_OF_Q0_CMD 0x166248
4178/* [ST 32] The number of commands received in queue 10 */
4179#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c
4180/* [ST 32] The number of commands received in queue 11 */
4181#define XSDM_REG_NUM_OF_Q11_CMD 0x166270
4182/* [ST 32] The number of commands received in queue 1 */
4183#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c
4184/* [ST 32] The number of commands received in queue 3 */
4185#define XSDM_REG_NUM_OF_Q3_CMD 0x166250
4186/* [ST 32] The number of commands received in queue 4 */
4187#define XSDM_REG_NUM_OF_Q4_CMD 0x166254
4188/* [ST 32] The number of commands received in queue 5 */
4189#define XSDM_REG_NUM_OF_Q5_CMD 0x166258
4190/* [ST 32] The number of commands received in queue 6 */
4191#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c
4192/* [ST 32] The number of commands received in queue 7 */
4193#define XSDM_REG_NUM_OF_Q7_CMD 0x166260
4194/* [ST 32] The number of commands received in queue 8 */
4195#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
4196/* [ST 32] The number of commands received in queue 9 */
4197#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
4198/* [RW 13] The start address in the internal RAM for queue counters */
4199#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
4200/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
4201#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
4202/* [R 1] parser fifo empty in sdm_sync block */
4203#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550
4204/* [R 1] parser serial fifo empty in sdm_sync block */
4205#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558
4206/* [RW 32] Tick for timer counter. Applicable only when
4207 ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
4208#define XSDM_REG_TIMER_TICK 0x166000
4209/* [RW 32] Interrupt mask register #0 read/write */
4210#define XSDM_REG_XSDM_INT_MASK_0 0x16629c
4211#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
4212/* [R 32] Interrupt register #0 read */
4213#define XSDM_REG_XSDM_INT_STS_0 0x166290
4214#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
4215/* [RW 11] Parity mask register #0 read/write */
4216#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
4217/* [R 11] Parity register #0 read */
4218#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
4219/* [RW 5] The number of time_slots in the arbitration cycle */
4220#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
4221/* [RW 3] The source that is associated with arbitration element 0. Source
4222 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4223 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
4224#define XSEM_REG_ARB_ELEMENT0 0x280020
4225/* [RW 3] The source that is associated with arbitration element 1. Source
4226 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4227 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4228 Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
4229#define XSEM_REG_ARB_ELEMENT1 0x280024
4230/* [RW 3] The source that is associated with arbitration element 2. Source
4231 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4232 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4233 Could not be equal to register ~xsem_registers_arb_element0.arb_element0
4234 and ~xsem_registers_arb_element1.arb_element1 */
4235#define XSEM_REG_ARB_ELEMENT2 0x280028
4236/* [RW 3] The source that is associated with arbitration element 3. Source
4237 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4238 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
4239 not be equal to register ~xsem_registers_arb_element0.arb_element0 and
4240 ~xsem_registers_arb_element1.arb_element1 and
4241 ~xsem_registers_arb_element2.arb_element2 */
4242#define XSEM_REG_ARB_ELEMENT3 0x28002c
4243/* [RW 3] The source that is associated with arbitration element 4. Source
4244 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4245 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4246 Could not be equal to register ~xsem_registers_arb_element0.arb_element0
4247 and ~xsem_registers_arb_element1.arb_element1 and
4248 ~xsem_registers_arb_element2.arb_element2 and
4249 ~xsem_registers_arb_element3.arb_element3 */
4250#define XSEM_REG_ARB_ELEMENT4 0x280030
4251#define XSEM_REG_ENABLE_IN 0x2800a4
4252#define XSEM_REG_ENABLE_OUT 0x2800a8
4253/* [RW 32] This address space contains all registers and memories that are
4254 placed in SEM_FAST block. The SEM_FAST registers are described in
4255 appendix B. In order to access the sem_fast registers the base address
4256 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
4257#define XSEM_REG_FAST_MEMORY 0x2a0000
4258/* [RW 1] Disables input messages from FIC0 May be updated during run_time
4259 by the microcode */
4260#define XSEM_REG_FIC0_DISABLE 0x280224
4261/* [RW 1] Disables input messages from FIC1 May be updated during run_time
4262 by the microcode */
4263#define XSEM_REG_FIC1_DISABLE 0x280234
4264/* [RW 15] Interrupt table Read and write access to it is not possible in
4265 the middle of the work */
4266#define XSEM_REG_INT_TABLE 0x280400
4267/* [ST 24] Statistics register. The number of messages that entered through
4268 FIC0 */
4269#define XSEM_REG_MSG_NUM_FIC0 0x280000
4270/* [ST 24] Statistics register. The number of messages that entered through
4271 FIC1 */
4272#define XSEM_REG_MSG_NUM_FIC1 0x280004
4273/* [ST 24] Statistics register. The number of messages that were sent to
4274 FOC0 */
4275#define XSEM_REG_MSG_NUM_FOC0 0x280008
4276/* [ST 24] Statistics register. The number of messages that were sent to
4277 FOC1 */
4278#define XSEM_REG_MSG_NUM_FOC1 0x28000c
4279/* [ST 24] Statistics register. The number of messages that were sent to
4280 FOC2 */
4281#define XSEM_REG_MSG_NUM_FOC2 0x280010
4282/* [ST 24] Statistics register. The number of messages that were sent to
4283 FOC3 */
4284#define XSEM_REG_MSG_NUM_FOC3 0x280014
4285/* [RW 1] Disables input messages from the passive buffer May be updated
4286 during run_time by the microcode */
4287#define XSEM_REG_PAS_DISABLE 0x28024c
4288/* [WB 128] Debug only. Passive buffer memory */
4289#define XSEM_REG_PASSIVE_BUFFER 0x282000
4290/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
4291#define XSEM_REG_PRAM 0x2c0000
4292/* [R 16] Valid sleeping threads indication have bit per thread */
4293#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c
4294/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
4295#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0
4296/* [RW 16] List of free threads . There is a bit per thread. */
4297#define XSEM_REG_THREADS_LIST 0x2802e4
4298/* [RW 3] The arbitration scheme of time_slot 0 */
4299#define XSEM_REG_TS_0_AS 0x280038
4300/* [RW 3] The arbitration scheme of time_slot 10 */
4301#define XSEM_REG_TS_10_AS 0x280060
4302/* [RW 3] The arbitration scheme of time_slot 11 */
4303#define XSEM_REG_TS_11_AS 0x280064
4304/* [RW 3] The arbitration scheme of time_slot 12 */
4305#define XSEM_REG_TS_12_AS 0x280068
4306/* [RW 3] The arbitration scheme of time_slot 13 */
4307#define XSEM_REG_TS_13_AS 0x28006c
4308/* [RW 3] The arbitration scheme of time_slot 14 */
4309#define XSEM_REG_TS_14_AS 0x280070
4310/* [RW 3] The arbitration scheme of time_slot 15 */
4311#define XSEM_REG_TS_15_AS 0x280074
4312/* [RW 3] The arbitration scheme of time_slot 16 */
4313#define XSEM_REG_TS_16_AS 0x280078
4314/* [RW 3] The arbitration scheme of time_slot 17 */
4315#define XSEM_REG_TS_17_AS 0x28007c
4316/* [RW 3] The arbitration scheme of time_slot 18 */
4317#define XSEM_REG_TS_18_AS 0x280080
4318/* [RW 3] The arbitration scheme of time_slot 1 */
4319#define XSEM_REG_TS_1_AS 0x28003c
4320/* [RW 3] The arbitration scheme of time_slot 2 */
4321#define XSEM_REG_TS_2_AS 0x280040
4322/* [RW 3] The arbitration scheme of time_slot 3 */
4323#define XSEM_REG_TS_3_AS 0x280044
4324/* [RW 3] The arbitration scheme of time_slot 4 */
4325#define XSEM_REG_TS_4_AS 0x280048
4326/* [RW 3] The arbitration scheme of time_slot 5 */
4327#define XSEM_REG_TS_5_AS 0x28004c
4328/* [RW 3] The arbitration scheme of time_slot 6 */
4329#define XSEM_REG_TS_6_AS 0x280050
4330/* [RW 3] The arbitration scheme of time_slot 7 */
4331#define XSEM_REG_TS_7_AS 0x280054
4332/* [RW 3] The arbitration scheme of time_slot 8 */
4333#define XSEM_REG_TS_8_AS 0x280058
4334/* [RW 3] The arbitration scheme of time_slot 9 */
4335#define XSEM_REG_TS_9_AS 0x28005c
4336/* [RW 32] Interrupt mask register #0 read/write */
4337#define XSEM_REG_XSEM_INT_MASK_0 0x280110
4338#define XSEM_REG_XSEM_INT_MASK_1 0x280120
4339/* [R 32] Interrupt register #0 read */
4340#define XSEM_REG_XSEM_INT_STS_0 0x280104
4341#define XSEM_REG_XSEM_INT_STS_1 0x280114
4342/* [RW 32] Parity mask register #0 read/write */
4343#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
4344#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
4345/* [R 32] Parity register #0 read */
4346#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
4347#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
4348#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
4349#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
4350#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
4351#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
4352#define MCPR_NVM_COMMAND_DOIT (1L<<4)
4353#define MCPR_NVM_COMMAND_DONE (1L<<3)
4354#define MCPR_NVM_COMMAND_FIRST (1L<<7)
4355#define MCPR_NVM_COMMAND_LAST (1L<<8)
4356#define MCPR_NVM_COMMAND_WR (1L<<5)
4357#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
4358#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
4359#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
4360#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
4361#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
4362#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
4363#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
4364#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
4365#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
4366#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
4367#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
4368#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
4369#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
4370#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
4371#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
4372#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
4373#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
4374#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
4375#define EMAC_LED_100MB_OVERRIDE (1L<<2)
4376#define EMAC_LED_10MB_OVERRIDE (1L<<3)
4377#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
4378#define EMAC_LED_OVERRIDE (1L<<0)
4379#define EMAC_LED_TRAFFIC (1L<<6)
4380#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
4381#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
4382#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
4383#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
4384#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
4385#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
4386#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
4387#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16)
4388#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
4389#define EMAC_MODE_25G_MODE (1L<<5)
4390#define EMAC_MODE_HALF_DUPLEX (1L<<1)
4391#define EMAC_MODE_PORT_GMII (2L<<2)
4392#define EMAC_MODE_PORT_MII (1L<<2)
4393#define EMAC_MODE_PORT_MII_10M (3L<<2)
4394#define EMAC_MODE_RESET (1L<<0)
4395#define EMAC_REG_EMAC_LED 0xc
4396#define EMAC_REG_EMAC_MAC_MATCH 0x10
4397#define EMAC_REG_EMAC_MDIO_COMM 0xac
4398#define EMAC_REG_EMAC_MDIO_MODE 0xb4
4399#define EMAC_REG_EMAC_MODE 0x0
4400#define EMAC_REG_EMAC_RX_MODE 0xc8
4401#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
4402#define EMAC_REG_EMAC_RX_STAT_AC 0x180
4403#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
4404#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
4405#define EMAC_REG_EMAC_TX_MODE 0xbc
4406#define EMAC_REG_EMAC_TX_STAT_AC 0x280
4407#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
4408#define EMAC_RX_MODE_FLOW_EN (1L<<2)
4409#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
4410#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
4411#define EMAC_RX_MODE_RESET (1L<<0)
4412#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
4413#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
4414#define EMAC_TX_MODE_FLOW_EN (1L<<4)
4415#define EMAC_TX_MODE_RESET (1L<<0)
4416#define MISC_REGISTERS_GPIO_0 0
4417#define MISC_REGISTERS_GPIO_1 1
4418#define MISC_REGISTERS_GPIO_2 2
4419#define MISC_REGISTERS_GPIO_3 3
4420#define MISC_REGISTERS_GPIO_CLR_POS 16
4421#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
4422#define MISC_REGISTERS_GPIO_FLOAT_POS 24
4423#define MISC_REGISTERS_GPIO_HIGH 1
4424#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
4425#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
4426#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
4427#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
4428#define MISC_REGISTERS_GPIO_INT_SET_POS 16
4429#define MISC_REGISTERS_GPIO_LOW 0
4430#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
4431#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
4432#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4433#define MISC_REGISTERS_GPIO_SET_POS 8
4434#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4435#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
4436#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4437#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
4438#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
4439#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4440#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4441#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
4442#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
4443#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
4444#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
4445#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
4446#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
4447#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
4448#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
4449#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
4450#define MISC_REGISTERS_RESET_REG_2_SET 0x594
4451#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
4452#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
4453#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
4454#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
4455#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
4456#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
4457#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
4458#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
4459#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
4460#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
4461#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
4462#define MISC_REGISTERS_SPIO_4 4
4463#define MISC_REGISTERS_SPIO_5 5
4464#define MISC_REGISTERS_SPIO_7 7
4465#define MISC_REGISTERS_SPIO_CLR_POS 16
4466#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
4467#define MISC_REGISTERS_SPIO_FLOAT_POS 24
4468#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
4469#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
4470#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
4471#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
4472#define MISC_REGISTERS_SPIO_SET_POS 8
4473#define HW_LOCK_MAX_RESOURCE_VALUE 31
4474#define HW_LOCK_RESOURCE_GPIO 1
4475#define HW_LOCK_RESOURCE_MDIO 0
4476#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
4477#define HW_LOCK_RESOURCE_RESERVED_08 8
4478#define HW_LOCK_RESOURCE_SPIO 2
4479#define HW_LOCK_RESOURCE_UNDI 5
4480#define PRS_FLAG_OVERETH_IPV4 1
4481#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4482#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4483#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
4484#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (1<<8)
4485#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (1<<7)
4486#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (1<<6)
4487#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (1<<29)
4488#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (1<<28)
4489#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (1<<1)
4490#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (1<<0)
4491#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (1<<18)
4492#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (1<<11)
4493#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (1<<13)
4494#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (1<<12)
4495#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
4496#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
4497#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
4498#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
4499#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
4500#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
4501#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
4502#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
4503#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
4504#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
4505#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0)
4506#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31)
4507#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3)
4508#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2)
4509#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5)
4510#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (1<<4)
4511#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3)
4512#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2)
4513#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22)
4514#define AEU_INPUTS_ATTN_BITS_SPIO5 (1<<15)
4515#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27)
4516#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5)
4517#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25)
4518#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (1<<24)
4519#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (1<<29)
4520#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (1<<28)
4521#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (1<<23)
4522#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (1<<27)
4523#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (1<<26)
4524#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (1<<21)
4525#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (1<<20)
4526#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (1<<25)
4527#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (1<<24)
4528#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (1<<16)
4529#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (1<<9)
4530#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (1<<7)
4531#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (1<<6)
4532#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (1<<11)
4533#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (1<<10)
4534#define RESERVED_GENERAL_ATTENTION_BIT_0 0
4535
4536#define EVEREST_GEN_ATTN_IN_USE_MASK 0x3ffe0
4537#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
4538
4539#define RESERVED_GENERAL_ATTENTION_BIT_6 6
4540#define RESERVED_GENERAL_ATTENTION_BIT_7 7
4541#define RESERVED_GENERAL_ATTENTION_BIT_8 8
4542#define RESERVED_GENERAL_ATTENTION_BIT_9 9
4543#define RESERVED_GENERAL_ATTENTION_BIT_10 10
4544#define RESERVED_GENERAL_ATTENTION_BIT_11 11
4545#define RESERVED_GENERAL_ATTENTION_BIT_12 12
4546#define RESERVED_GENERAL_ATTENTION_BIT_13 13
4547#define RESERVED_GENERAL_ATTENTION_BIT_14 14
4548#define RESERVED_GENERAL_ATTENTION_BIT_15 15
4549#define RESERVED_GENERAL_ATTENTION_BIT_16 16
4550#define RESERVED_GENERAL_ATTENTION_BIT_17 17
4551#define RESERVED_GENERAL_ATTENTION_BIT_18 18
4552#define RESERVED_GENERAL_ATTENTION_BIT_19 19
4553#define RESERVED_GENERAL_ATTENTION_BIT_20 20
4554#define RESERVED_GENERAL_ATTENTION_BIT_21 21
4555
4556/* storm asserts attention bits */
4557#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
4558#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
4559#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
4560#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
4561
4562/* mcp error attention bit */
4563#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
4564
4565/*E1H NIG status sync attention mapped to group 4-7*/
4566#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
4567#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
4568#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
4569#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
4570#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
4571#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
4572#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
4573#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
4574
4575
4576#define LATCHED_ATTN_RBCR 23
4577#define LATCHED_ATTN_RBCT 24
4578#define LATCHED_ATTN_RBCN 25
4579#define LATCHED_ATTN_RBCU 26
4580#define LATCHED_ATTN_RBCP 27
4581#define LATCHED_ATTN_TIMEOUT_GRC 28
4582#define LATCHED_ATTN_RSVD_GRC 29
4583#define LATCHED_ATTN_ROM_PARITY_MCP 30
4584#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
4585#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
4586#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
4587
4588#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
4589#define GENERAL_ATTEN_OFFSET(atten_name)\
4590 (1UL << ((94 + atten_name) % 32))
4591/*
4592 * This file defines GRC base address for every block.
4593 * This file is included by chipsim, asm microcode and cpp microcode.
4594 * These values are used in Design.xml on regBase attribute
4595 * Use the base with the generated offsets of specific registers.
4596 */
4597
4598#define GRCBASE_PXPCS 0x000000
4599#define GRCBASE_PCICONFIG 0x002000
4600#define GRCBASE_PCIREG 0x002400
4601#define GRCBASE_EMAC0 0x008000
4602#define GRCBASE_EMAC1 0x008400
4603#define GRCBASE_DBU 0x008800
4604#define GRCBASE_MISC 0x00A000
4605#define GRCBASE_DBG 0x00C000
4606#define GRCBASE_NIG 0x010000
4607#define GRCBASE_XCM 0x020000
4608#define GRCBASE_PRS 0x040000
4609#define GRCBASE_SRCH 0x040400
4610#define GRCBASE_TSDM 0x042000
4611#define GRCBASE_TCM 0x050000
4612#define GRCBASE_BRB1 0x060000
4613#define GRCBASE_MCP 0x080000
4614#define GRCBASE_UPB 0x0C1000
4615#define GRCBASE_CSDM 0x0C2000
4616#define GRCBASE_USDM 0x0C4000
4617#define GRCBASE_CCM 0x0D0000
4618#define GRCBASE_UCM 0x0E0000
4619#define GRCBASE_CDU 0x101000
4620#define GRCBASE_DMAE 0x102000
4621#define GRCBASE_PXP 0x103000
4622#define GRCBASE_CFC 0x104000
4623#define GRCBASE_HC 0x108000
4624#define GRCBASE_PXP2 0x120000
4625#define GRCBASE_PBF 0x140000
4626#define GRCBASE_XPB 0x161000
4627#define GRCBASE_TIMERS 0x164000
4628#define GRCBASE_XSDM 0x166000
4629#define GRCBASE_QM 0x168000
4630#define GRCBASE_DQ 0x170000
4631#define GRCBASE_TSEM 0x180000
4632#define GRCBASE_CSEM 0x200000
4633#define GRCBASE_XSEM 0x280000
4634#define GRCBASE_USEM 0x300000
4635#define GRCBASE_MISC_AEU GRCBASE_MISC
4636
4637
4638/* offset of configuration space in the pci core register */
4639#define PCICFG_OFFSET 0x2000
4640#define PCICFG_VENDOR_ID_OFFSET 0x00
4641#define PCICFG_DEVICE_ID_OFFSET 0x02
4642#define PCICFG_COMMAND_OFFSET 0x04
4643#define PCICFG_COMMAND_IO_SPACE (1<<0)
4644#define PCICFG_COMMAND_MEM_SPACE (1<<1)
4645#define PCICFG_COMMAND_BUS_MASTER (1<<2)
4646#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
4647#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
4648#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
4649#define PCICFG_COMMAND_PERR_ENA (1<<6)
4650#define PCICFG_COMMAND_STEPPING (1<<7)
4651#define PCICFG_COMMAND_SERR_ENA (1<<8)
4652#define PCICFG_COMMAND_FAST_B2B (1<<9)
4653#define PCICFG_COMMAND_INT_DISABLE (1<<10)
4654#define PCICFG_COMMAND_RESERVED (0x1f<<11)
4655#define PCICFG_STATUS_OFFSET 0x06
4656#define PCICFG_REVESION_ID_OFFSET 0x08
4657#define PCICFG_CACHE_LINE_SIZE 0x0c
4658#define PCICFG_LATENCY_TIMER 0x0d
4659#define PCICFG_BAR_1_LOW 0x10
4660#define PCICFG_BAR_1_HIGH 0x14
4661#define PCICFG_BAR_2_LOW 0x18
4662#define PCICFG_BAR_2_HIGH 0x1c
4663#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
4664#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
4665#define PCICFG_INT_LINE 0x3c
4666#define PCICFG_INT_PIN 0x3d
4667#define PCICFG_PM_CAPABILITY 0x48
4668#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
4669#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
4670#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
4671#define PCICFG_PM_CAPABILITY_DSI (1<<21)
4672#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
4673#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
4674#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
4675#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
4676#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
4677#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
4678#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
4679#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
4680#define PCICFG_PM_CSR_OFFSET 0x4c
4681#define PCICFG_PM_CSR_STATE (0x3<<0)
4682#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
4683#define PCICFG_PM_CSR_PME_STATUS (1<<15)
4684#define PCICFG_MSI_CAP_ID_OFFSET 0x58
4685#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
4686#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
4687#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
4688#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
4689#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
4690#define PCICFG_GRC_ADDRESS 0x78
4691#define PCICFG_GRC_DATA 0x80
4692#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
4693#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
4694#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
4695#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
4696#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
4697
4698#define PCICFG_DEVICE_CONTROL 0xb4
4699#define PCICFG_DEVICE_STATUS 0xb6
4700#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
4701#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
4702#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
4703#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
4704#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
4705#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
4706#define PCICFG_LINK_CONTROL 0xbc
4707
4708
4709#define BAR_USTRORM_INTMEM 0x400000
4710#define BAR_CSTRORM_INTMEM 0x410000
4711#define BAR_XSTRORM_INTMEM 0x420000
4712#define BAR_TSTRORM_INTMEM 0x430000
4713
4714/* for accessing the IGU in case of status block ACK */
4715#define BAR_IGU_INTMEM 0x440000
4716
4717#define BAR_DOORBELL_OFFSET 0x800000
4718
4719#define BAR_ME_REGISTER 0x450000
4720
4721/* config_2 offset */
4722#define GRC_CONFIG_2_SIZE_REG 0x408
4723#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
4724#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
4725#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
4726#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
4727#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
4728#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
4729#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
4730#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
4731#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
4732#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
4733#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
4734#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
4735#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
4736#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
4737#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
4738#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
4739#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
4740#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
4741#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
4742#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
4743#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
4744#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
4745#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
4746#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
4747#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
4748#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
4749#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
4750#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
4751#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
4752#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
4753#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
4754#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
4755#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
4756#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
4757#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
4758#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
4759#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
4760#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
4761#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
4762#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
4763
4764/* config_3 offset */
4765#define GRC_CONFIG_3_SIZE_REG 0x40c
4766#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
4767#define PCI_CONFIG_3_FORCE_PME (1L<<24)
4768#define PCI_CONFIG_3_PME_STATUS (1L<<25)
4769#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
4770#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
4771#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
4772#define PCI_CONFIG_3_PCI_POWER (1L<<31)
4773
4774#define GRC_BAR2_CONFIG 0x4e0
4775#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
4776#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
4777#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
4778#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
4779#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
4780#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
4781#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
4782#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
4783#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
4784#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
4785#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
4786#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
4787#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
4788#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
4789#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
4790#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
4791#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
4792#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
4793
4794#define PCI_PM_DATA_A 0x410
4795#define PCI_PM_DATA_B 0x414
4796#define PCI_ID_VAL1 0x434
4797#define PCI_ID_VAL2 0x438
4798
4799
4800#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4801#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
4802#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
4803#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
4804#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
4805
4806#define MDIO_REG_BANK_CL73_IEEEB1 0x10
4807#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
4808#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
4809#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
4810#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
4811#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
4812#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
4813#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
4814#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
4815#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
4816#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
4817#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
4818#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
4819#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
4820#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
4821#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
4822
4823#define MDIO_REG_BANK_RX0 0x80b0
4824#define MDIO_RX0_RX_STATUS 0x10
4825#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
4826#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
4827#define MDIO_RX0_RX_EQ_BOOST 0x1c
4828#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4829#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
4830
4831#define MDIO_REG_BANK_RX1 0x80c0
4832#define MDIO_RX1_RX_EQ_BOOST 0x1c
4833#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4834#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
4835
4836#define MDIO_REG_BANK_RX2 0x80d0
4837#define MDIO_RX2_RX_EQ_BOOST 0x1c
4838#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4839#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
4840
4841#define MDIO_REG_BANK_RX3 0x80e0
4842#define MDIO_RX3_RX_EQ_BOOST 0x1c
4843#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4844#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
4845
4846#define MDIO_REG_BANK_RX_ALL 0x80f0
4847#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
4848#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4849#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
4850
4851#define MDIO_REG_BANK_TX0 0x8060
4852#define MDIO_TX0_TX_DRIVER 0x17
4853#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4854#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4855#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4856#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4857#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4858#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4859#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4860#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4861#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4862
4863#define MDIO_REG_BANK_TX1 0x8070
4864#define MDIO_TX1_TX_DRIVER 0x17
4865#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4866#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4867#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4868#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4869#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4870#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4871#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4872#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4873#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4874
4875#define MDIO_REG_BANK_TX2 0x8080
4876#define MDIO_TX2_TX_DRIVER 0x17
4877#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4878#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4879#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4880#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4881#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4882#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4883#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4884#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4885#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4886
4887#define MDIO_REG_BANK_TX3 0x8090
4888#define MDIO_TX3_TX_DRIVER 0x17
4889#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4890#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4891#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4892#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4893#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4894#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4895#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4896#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4897#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4898
4899#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
4900#define MDIO_BLOCK0_XGXS_CONTROL 0x10
4901
4902#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
4903#define MDIO_BLOCK1_LANE_CTRL0 0x15
4904#define MDIO_BLOCK1_LANE_CTRL1 0x16
4905#define MDIO_BLOCK1_LANE_CTRL2 0x17
4906#define MDIO_BLOCK1_LANE_PRBS 0x19
4907
4908#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
4909#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
4910#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
4911#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
4912#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
4913#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
4914#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
4915#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
4916#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
4917#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
4918
4919#define MDIO_REG_BANK_GP_STATUS 0x8120
4920#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
4921#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
4922#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
4923#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
4924#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
4925#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
4926#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
4927#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
4928#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
4929#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
4930#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
4931#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
4932#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
4933#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
4934#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
4935#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
4936#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
4937#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
4938#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
4939#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
4940#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
4941#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
4942#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
4943#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
4944#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
4945
4946
4947#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
4948#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
4949#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
4950#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
4951#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
4952#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
4953#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
4954
4955#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
4956#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
4957#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
4958#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
4959#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
4960#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
4961#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
4962#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
4963#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
4964#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
4965#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
4966#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
4967#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
4968#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
4969#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
4970#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
4971#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
4972#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
4973#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
4974#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
4975#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
4976#define MDIO_SERDES_DIGITAL_MISC1 0x18
4977#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
4978#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
4979#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
4980#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
4981#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
4982#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
4983#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
4984#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
4985#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
4986#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
4987#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
4988#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
4989#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
4990#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
4991#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
4992#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
4993#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
4994#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
4995
4996#define MDIO_REG_BANK_OVER_1G 0x8320
4997#define MDIO_OVER_1G_DIGCTL_3_4 0x14
4998#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
4999#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
5000#define MDIO_OVER_1G_UP1 0x19
5001#define MDIO_OVER_1G_UP1_2_5G 0x0001
5002#define MDIO_OVER_1G_UP1_5G 0x0002
5003#define MDIO_OVER_1G_UP1_6G 0x0004
5004#define MDIO_OVER_1G_UP1_10G 0x0010
5005#define MDIO_OVER_1G_UP1_10GH 0x0008
5006#define MDIO_OVER_1G_UP1_12G 0x0020
5007#define MDIO_OVER_1G_UP1_12_5G 0x0040
5008#define MDIO_OVER_1G_UP1_13G 0x0080
5009#define MDIO_OVER_1G_UP1_15G 0x0100
5010#define MDIO_OVER_1G_UP1_16G 0x0200
5011#define MDIO_OVER_1G_UP2 0x1A
5012#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
5013#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
5014#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
5015#define MDIO_OVER_1G_UP3 0x1B
5016#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
5017#define MDIO_OVER_1G_LP_UP1 0x1C
5018#define MDIO_OVER_1G_LP_UP2 0x1D
5019#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
5020#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
5021#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
5022#define MDIO_OVER_1G_LP_UP3 0x1E
5023
5024#define MDIO_REG_BANK_REMOTE_PHY 0x8330
5025#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
5026#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
5027#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
5028
5029#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
5030#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
5031#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
5032#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
5033
5034#define MDIO_REG_BANK_CL73_USERB0 0x8370
5035#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
5036#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
5037#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
5038#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
5039#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
5040#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
5041#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
5042#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
5043#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
5044#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
5045#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
5046
5047#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
5048#define MDIO_AER_BLOCK_AER_REG 0x1E
5049
5050#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
5051#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
5052#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
5053#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
5054#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
5055#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
5056#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
5057#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
5058#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
5059#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
5060#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
5061#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
5062#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
5063#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
5064#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
5065#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
5066#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
5067#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
5068#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
5069#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
5070#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
5071#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
5072#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
5073#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
5074#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
5075#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
5076#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
5077#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
5078#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
5079#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
5080#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
5081/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
5082bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
5083Theotherbitsarereservedandshouldbezero*/
5084#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
5085
5086
5087#define MDIO_PMA_DEVAD 0x1
5088/*ieee*/
5089#define MDIO_PMA_REG_CTRL 0x0
5090#define MDIO_PMA_REG_STATUS 0x1
5091#define MDIO_PMA_REG_10G_CTRL2 0x7
5092#define MDIO_PMA_REG_RX_SD 0xa
5093/*bcm*/
5094#define MDIO_PMA_REG_BCM_CTRL 0x0096
5095#define MDIO_PMA_REG_FEC_CTRL 0x00ab
5096#define MDIO_PMA_REG_RX_ALARM_CTRL 0x9000
5097#define MDIO_PMA_REG_LASI_CTRL 0x9002
5098#define MDIO_PMA_REG_RX_ALARM 0x9003
5099#define MDIO_PMA_REG_TX_ALARM 0x9004
5100#define MDIO_PMA_REG_LASI_STATUS 0x9005
5101#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
5102#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
5103#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
5104#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
5105#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
5106#define MDIO_PMA_REG_MISC_CTRL 0xca0a
5107#define MDIO_PMA_REG_GEN_CTRL 0xca10
5108#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
5109#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
5110#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
5111#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
5112#define MDIO_PMA_REG_ROM_VER1 0xca19
5113#define MDIO_PMA_REG_ROM_VER2 0xca1a
5114#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
5115#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
5116#define MDIO_PMA_REG_PLL_CTRL 0xca1e
5117#define MDIO_PMA_REG_MISC_CTRL0 0xca23
5118#define MDIO_PMA_REG_LRM_MODE 0xca3f
5119#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
5120#define MDIO_PMA_REG_MISC_CTRL1 0xca85
5121
5122#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
5123#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
5124#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
5125#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
5126#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
5127#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
5128#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
5129#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
5130#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
5131#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
5132#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
5133#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
5134
5135#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
5136#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
5137#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
5138#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
5139#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
5140#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
5141#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
5142#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
5143
5144#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
5145#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
5146#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
5147
5148#define MDIO_PMA_REG_7101_RESET 0xc000
5149#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
5150#define MDIO_PMA_REG_7101_VER1 0xc026
5151#define MDIO_PMA_REG_7101_VER2 0xc027
5152
5153#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
5154#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5155#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5156#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5157#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5158#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
5159#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
5160
5161
5162#define MDIO_WIS_DEVAD 0x2
5163/*bcm*/
5164#define MDIO_WIS_REG_LASI_CNTL 0x9002
5165#define MDIO_WIS_REG_LASI_STATUS 0x9005
5166
5167#define MDIO_PCS_DEVAD 0x3
5168#define MDIO_PCS_REG_STATUS 0x0020
5169#define MDIO_PCS_REG_LASI_STATUS 0x9005
5170#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
5171#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
5172#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
5173#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
5174#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
5175#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
5176#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
5177#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
5178#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
5179
5180
5181#define MDIO_XS_DEVAD 0x4
5182#define MDIO_XS_PLL_SEQUENCER 0x8000
5183#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
5184
5185#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
5186#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
5187#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
5188#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
5189#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
5190
5191#define MDIO_AN_DEVAD 0x7
5192/*ieee*/
5193#define MDIO_AN_REG_CTRL 0x0000
5194#define MDIO_AN_REG_STATUS 0x0001
5195#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
5196#define MDIO_AN_REG_ADV_PAUSE 0x0010
5197#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
5198#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
5199#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
5200#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
5201#define MDIO_AN_REG_ADV 0x0011
5202#define MDIO_AN_REG_ADV2 0x0012
5203#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
5204#define MDIO_AN_REG_MASTER_STATUS 0x0021
5205/*bcm*/
5206#define MDIO_AN_REG_LINK_STATUS 0x8304
5207#define MDIO_AN_REG_CL37_CL73 0x8370
5208#define MDIO_AN_REG_CL37_AN 0xffe0
5209#define MDIO_AN_REG_CL37_FC_LD 0xffe4
5210#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5211
5212#define MDIO_AN_REG_8073_2_5G 0x8329
5213
5214#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
5215#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
5216#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
5217#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
5218#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
5219#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
5220
5221#define IGU_FUNC_BASE 0x0400
5222
5223#define IGU_ADDR_MSIX 0x0000
5224#define IGU_ADDR_INT_ACK 0x0200
5225#define IGU_ADDR_PROD_UPD 0x0201
5226#define IGU_ADDR_ATTN_BITS_UPD 0x0202
5227#define IGU_ADDR_ATTN_BITS_SET 0x0203
5228#define IGU_ADDR_ATTN_BITS_CLR 0x0204
5229#define IGU_ADDR_COALESCE_NOW 0x0205
5230#define IGU_ADDR_SIMD_MASK 0x0206
5231#define IGU_ADDR_SIMD_NOMASK 0x0207
5232#define IGU_ADDR_MSI_CTL 0x0210
5233#define IGU_ADDR_MSI_ADDR_LO 0x0211
5234#define IGU_ADDR_MSI_ADDR_HI 0x0212
5235#define IGU_ADDR_MSI_DATA 0x0213
5236
5237#define IGU_INT_ENABLE 0
5238#define IGU_INT_DISABLE 1
5239#define IGU_INT_NOP 2
5240#define IGU_INT_NOP2 3
5241
5242#define COMMAND_REG_INT_ACK 0x0
5243#define COMMAND_REG_PROD_UPD 0x4
5244#define COMMAND_REG_ATTN_BITS_UPD 0x8
5245#define COMMAND_REG_ATTN_BITS_SET 0xc
5246#define COMMAND_REG_ATTN_BITS_CLR 0x10
5247#define COMMAND_REG_COALESCE_NOW 0x14
5248#define COMMAND_REG_SIMD_MASK 0x18
5249#define COMMAND_REG_SIMD_NOMASK 0x1c
5250
5251
5252#define IGU_MEM_BASE 0x0000
5253
5254#define IGU_MEM_MSIX_BASE 0x0000
5255#define IGU_MEM_MSIX_UPPER 0x007f
5256#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
5257
5258#define IGU_MEM_PBA_MSIX_BASE 0x0200
5259#define IGU_MEM_PBA_MSIX_UPPER 0x0200
5260
5261#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
5262#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
5263
5264#define IGU_CMD_INT_ACK_BASE 0x0400
5265#define IGU_CMD_INT_ACK_UPPER\
5266 (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
5267#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
5268
5269#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
5270#define IGU_CMD_E2_PROD_UPD_UPPER\
5271 (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
5272#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
5273
5274#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
5275#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
5276#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
5277
5278#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
5279#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
5280#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
5281#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
5282
5283#define IGU_REG_RESERVED_UPPER 0x05ff
5284
5285
5286#define CDU_REGION_NUMBER_XCM_AG 2
5287#define CDU_REGION_NUMBER_UCM_AG 4
5288
5289
5290/**
5291 * String-to-compress [31:8] = CID (all 24 bits)
5292 * String-to-compress [7:4] = Region
5293 * String-to-compress [3:0] = Type
5294 */
5295#define CDU_VALID_DATA(_cid, _region, _type)\
5296 (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
5297#define CDU_CRC8(_cid, _region, _type)\
5298 (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
5299#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
5300 (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
5301#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
5302 (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
5303#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
5304
5305/******************************************************************************
5306 * Description:
5307 * Calculates crc 8 on a word value: polynomial 0-1-2-8
5308 * Code was translated from Verilog.
5309 * Return:
5310 *****************************************************************************/
5311static inline u8 calc_crc8(u32 data, u8 crc)
5312{
5313 u8 D[32];
5314 u8 NewCRC[8];
5315 u8 C[8];
5316 u8 crc_res;
5317 u8 i;
5318
5319 /* split the data into 31 bits */
5320 for (i = 0; i < 32; i++) {
5321 D[i] = (u8)(data & 1);
5322 data = data >> 1;
5323 }
5324
5325 /* split the crc into 8 bits */
5326 for (i = 0; i < 8; i++) {
5327 C[i] = crc & 1;
5328 crc = crc >> 1;
5329 }
5330
5331 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5332 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5333 C[6] ^ C[7];
5334 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5335 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5336 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
5337 C[6];
5338 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5339 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5340 C[0] ^ C[1] ^ C[4] ^ C[5];
5341 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5342 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5343 C[1] ^ C[2] ^ C[5] ^ C[6];
5344 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5345 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5346 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5347 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5348 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5349 C[3] ^ C[4] ^ C[7];
5350 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5351 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
5352 C[5];
5353 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5354 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
5355 C[6];
5356
5357 crc_res = 0;
5358 for (i = 0; i < 8; i++)
5359 crc_res |= (NewCRC[i] << i);
5360
5361 return crc_res;
5362}
5363
5364
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
new file mode 100644
index 00000000000..c7472446102
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -0,0 +1,1411 @@
1/* bnx2x_stats.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17 #include "bnx2x_cmn.h"
18 #include "bnx2x_stats.h"
19
20/* Statistics */
21
22/****************************************************************************
23* Macros
24****************************************************************************/
25
26/* sum[hi:lo] += add[hi:lo] */
27#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
28 do { \
29 s_lo += a_lo; \
30 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
31 } while (0)
32
33/* difference = minuend - subtrahend */
34#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
35 do { \
36 if (m_lo < s_lo) { \
37 /* underflow */ \
38 d_hi = m_hi - s_hi; \
39 if (d_hi > 0) { \
40 /* we can 'loan' 1 */ \
41 d_hi--; \
42 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
43 } else { \
44 /* m_hi <= s_hi */ \
45 d_hi = 0; \
46 d_lo = 0; \
47 } \
48 } else { \
49 /* m_lo >= s_lo */ \
50 if (m_hi < s_hi) { \
51 d_hi = 0; \
52 d_lo = 0; \
53 } else { \
54 /* m_hi >= s_hi */ \
55 d_hi = m_hi - s_hi; \
56 d_lo = m_lo - s_lo; \
57 } \
58 } \
59 } while (0)
60
61#define UPDATE_STAT64(s, t) \
62 do { \
63 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
64 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
65 pstats->mac_stx[0].t##_hi = new->s##_hi; \
66 pstats->mac_stx[0].t##_lo = new->s##_lo; \
67 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
68 pstats->mac_stx[1].t##_lo, diff.lo); \
69 } while (0)
70
71#define UPDATE_STAT64_NIG(s, t) \
72 do { \
73 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
74 diff.lo, new->s##_lo, old->s##_lo); \
75 ADD_64(estats->t##_hi, diff.hi, \
76 estats->t##_lo, diff.lo); \
77 } while (0)
78
79/* sum[hi:lo] += add */
80#define ADD_EXTEND_64(s_hi, s_lo, a) \
81 do { \
82 s_lo += a; \
83 s_hi += (s_lo < a) ? 1 : 0; \
84 } while (0)
85
86#define UPDATE_EXTEND_STAT(s) \
87 do { \
88 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
89 pstats->mac_stx[1].s##_lo, \
90 new->s); \
91 } while (0)
92
93#define UPDATE_EXTEND_TSTAT(s, t) \
94 do { \
95 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
96 old_tclient->s = tclient->s; \
97 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
98 } while (0)
99
100#define UPDATE_EXTEND_USTAT(s, t) \
101 do { \
102 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
103 old_uclient->s = uclient->s; \
104 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
105 } while (0)
106
107#define UPDATE_EXTEND_XSTAT(s, t) \
108 do { \
109 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
110 old_xclient->s = xclient->s; \
111 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
112 } while (0)
113
114/* minuend -= subtrahend */
115#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
116 do { \
117 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
118 } while (0)
119
120/* minuend[hi:lo] -= subtrahend */
121#define SUB_EXTEND_64(m_hi, m_lo, s) \
122 do { \
123 SUB_64(m_hi, 0, m_lo, s); \
124 } while (0)
125
126#define SUB_EXTEND_USTAT(s, t) \
127 do { \
128 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
129 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
130 } while (0)
131
132/*
133 * General service functions
134 */
135
136static inline long bnx2x_hilo(u32 *hiref)
137{
138 u32 lo = *(hiref + 1);
139#if (BITS_PER_LONG == 64)
140 u32 hi = *hiref;
141
142 return HILO_U64(hi, lo);
143#else
144 return lo;
145#endif
146}
147
148/*
149 * Init service functions
150 */
151
152
153static void bnx2x_storm_stats_post(struct bnx2x *bp)
154{
155 if (!bp->stats_pending) {
156 struct eth_query_ramrod_data ramrod_data = {0};
157 int i, rc;
158
159 spin_lock_bh(&bp->stats_lock);
160
161 ramrod_data.drv_counter = bp->stats_counter++;
162 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
163 for_each_queue(bp, i)
164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
165
166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
167 ((u32 *)&ramrod_data)[1],
168 ((u32 *)&ramrod_data)[0], 0);
169 if (rc == 0) {
170 /* stats ramrod has it's own slot on the spq */
171 bp->spq_left++;
172 bp->stats_pending = 1;
173 }
174
175 spin_unlock_bh(&bp->stats_lock);
176 }
177}
178
179static void bnx2x_hw_stats_post(struct bnx2x *bp)
180{
181 struct dmae_command *dmae = &bp->stats_dmae;
182 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
183
184 *stats_comp = DMAE_COMP_VAL;
185 if (CHIP_REV_IS_SLOW(bp))
186 return;
187
188 /* loader */
189 if (bp->executer_idx) {
190 int loader_idx = PMF_DMAE_C(bp);
191
192 memset(dmae, 0, sizeof(struct dmae_command));
193
194 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
195 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
196 DMAE_CMD_DST_RESET |
197#ifdef __BIG_ENDIAN
198 DMAE_CMD_ENDIANITY_B_DW_SWAP |
199#else
200 DMAE_CMD_ENDIANITY_DW_SWAP |
201#endif
202 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
203 DMAE_CMD_PORT_0) |
204 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
207 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
208 sizeof(struct dmae_command) *
209 (loader_idx + 1)) >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = sizeof(struct dmae_command) >> 2;
212 if (CHIP_IS_E1(bp))
213 dmae->len--;
214 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
215 dmae->comp_addr_hi = 0;
216 dmae->comp_val = 1;
217
218 *stats_comp = 0;
219 bnx2x_post_dmae(bp, dmae, loader_idx);
220
221 } else if (bp->func_stx) {
222 *stats_comp = 0;
223 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
224 }
225}
226
227static int bnx2x_stats_comp(struct bnx2x *bp)
228{
229 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
230 int cnt = 10;
231
232 might_sleep();
233 while (*stats_comp != DMAE_COMP_VAL) {
234 if (!cnt) {
235 BNX2X_ERR("timeout waiting for stats finished\n");
236 break;
237 }
238 cnt--;
239 msleep(1);
240 }
241 return 1;
242}
243
244/*
245 * Statistics service functions
246 */
247
248static void bnx2x_stats_pmf_update(struct bnx2x *bp)
249{
250 struct dmae_command *dmae;
251 u32 opcode;
252 int loader_idx = PMF_DMAE_C(bp);
253 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
254
255 /* sanity */
256 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
257 BNX2X_ERR("BUG!\n");
258 return;
259 }
260
261 bp->executer_idx = 0;
262
263 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
264 DMAE_CMD_C_ENABLE |
265 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
266#ifdef __BIG_ENDIAN
267 DMAE_CMD_ENDIANITY_B_DW_SWAP |
268#else
269 DMAE_CMD_ENDIANITY_DW_SWAP |
270#endif
271 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
272 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
273
274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
275 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
276 dmae->src_addr_lo = bp->port.port_stx >> 2;
277 dmae->src_addr_hi = 0;
278 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
279 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
280 dmae->len = DMAE_LEN32_RD_MAX;
281 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
282 dmae->comp_addr_hi = 0;
283 dmae->comp_val = 1;
284
285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
286 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
287 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
288 dmae->src_addr_hi = 0;
289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
290 DMAE_LEN32_RD_MAX * 4);
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
292 DMAE_LEN32_RD_MAX * 4);
293 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
294 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
295 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
296 dmae->comp_val = DMAE_COMP_VAL;
297
298 *stats_comp = 0;
299 bnx2x_hw_stats_post(bp);
300 bnx2x_stats_comp(bp);
301}
302
303static void bnx2x_port_stats_init(struct bnx2x *bp)
304{
305 struct dmae_command *dmae;
306 int port = BP_PORT(bp);
307 int vn = BP_E1HVN(bp);
308 u32 opcode;
309 int loader_idx = PMF_DMAE_C(bp);
310 u32 mac_addr;
311 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
312
313 /* sanity */
314 if (!bp->link_vars.link_up || !bp->port.pmf) {
315 BNX2X_ERR("BUG!\n");
316 return;
317 }
318
319 bp->executer_idx = 0;
320
321 /* MCP */
322 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
323 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
324 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
325#ifdef __BIG_ENDIAN
326 DMAE_CMD_ENDIANITY_B_DW_SWAP |
327#else
328 DMAE_CMD_ENDIANITY_DW_SWAP |
329#endif
330 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
331 (vn << DMAE_CMD_E1HVN_SHIFT));
332
333 if (bp->port.port_stx) {
334
335 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
336 dmae->opcode = opcode;
337 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
338 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
339 dmae->dst_addr_lo = bp->port.port_stx >> 2;
340 dmae->dst_addr_hi = 0;
341 dmae->len = sizeof(struct host_port_stats) >> 2;
342 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
343 dmae->comp_addr_hi = 0;
344 dmae->comp_val = 1;
345 }
346
347 if (bp->func_stx) {
348
349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
350 dmae->opcode = opcode;
351 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
352 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
353 dmae->dst_addr_lo = bp->func_stx >> 2;
354 dmae->dst_addr_hi = 0;
355 dmae->len = sizeof(struct host_func_stats) >> 2;
356 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
357 dmae->comp_addr_hi = 0;
358 dmae->comp_val = 1;
359 }
360
361 /* MAC */
362 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
363 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
364 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
365#ifdef __BIG_ENDIAN
366 DMAE_CMD_ENDIANITY_B_DW_SWAP |
367#else
368 DMAE_CMD_ENDIANITY_DW_SWAP |
369#endif
370 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
371 (vn << DMAE_CMD_E1HVN_SHIFT));
372
373 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
374
375 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
376 NIG_REG_INGRESS_BMAC0_MEM);
377
378 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
379 BIGMAC_REGISTER_TX_STAT_GTBYT */
380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
381 dmae->opcode = opcode;
382 dmae->src_addr_lo = (mac_addr +
383 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
384 dmae->src_addr_hi = 0;
385 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
386 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
387 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
388 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
389 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
390 dmae->comp_addr_hi = 0;
391 dmae->comp_val = 1;
392
393 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
394 BIGMAC_REGISTER_RX_STAT_GRIPJ */
395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
396 dmae->opcode = opcode;
397 dmae->src_addr_lo = (mac_addr +
398 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
399 dmae->src_addr_hi = 0;
400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
401 offsetof(struct bmac_stats, rx_stat_gr64_lo));
402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
403 offsetof(struct bmac_stats, rx_stat_gr64_lo));
404 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
405 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
407 dmae->comp_addr_hi = 0;
408 dmae->comp_val = 1;
409
410 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
411
412 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
413
414 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
416 dmae->opcode = opcode;
417 dmae->src_addr_lo = (mac_addr +
418 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
419 dmae->src_addr_hi = 0;
420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
421 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
422 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
423 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
424 dmae->comp_addr_hi = 0;
425 dmae->comp_val = 1;
426
427 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
428 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
429 dmae->opcode = opcode;
430 dmae->src_addr_lo = (mac_addr +
431 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
432 dmae->src_addr_hi = 0;
433 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
434 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
435 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
436 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
437 dmae->len = 1;
438 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
439 dmae->comp_addr_hi = 0;
440 dmae->comp_val = 1;
441
442 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
443 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
444 dmae->opcode = opcode;
445 dmae->src_addr_lo = (mac_addr +
446 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
447 dmae->src_addr_hi = 0;
448 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
449 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
450 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
451 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
452 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
453 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
454 dmae->comp_addr_hi = 0;
455 dmae->comp_val = 1;
456 }
457
458 /* NIG */
459 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
460 dmae->opcode = opcode;
461 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
462 NIG_REG_STAT0_BRB_DISCARD) >> 2;
463 dmae->src_addr_hi = 0;
464 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
465 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
466 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
467 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
468 dmae->comp_addr_hi = 0;
469 dmae->comp_val = 1;
470
471 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
472 dmae->opcode = opcode;
473 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
474 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
475 dmae->src_addr_hi = 0;
476 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
477 offsetof(struct nig_stats, egress_mac_pkt0_lo));
478 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
479 offsetof(struct nig_stats, egress_mac_pkt0_lo));
480 dmae->len = (2*sizeof(u32)) >> 2;
481 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
482 dmae->comp_addr_hi = 0;
483 dmae->comp_val = 1;
484
485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
486 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
489#ifdef __BIG_ENDIAN
490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
491#else
492 DMAE_CMD_ENDIANITY_DW_SWAP |
493#endif
494 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
495 (vn << DMAE_CMD_E1HVN_SHIFT));
496 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
497 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
498 dmae->src_addr_hi = 0;
499 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
500 offsetof(struct nig_stats, egress_mac_pkt1_lo));
501 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
502 offsetof(struct nig_stats, egress_mac_pkt1_lo));
503 dmae->len = (2*sizeof(u32)) >> 2;
504 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
505 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
506 dmae->comp_val = DMAE_COMP_VAL;
507
508 *stats_comp = 0;
509}
510
511static void bnx2x_func_stats_init(struct bnx2x *bp)
512{
513 struct dmae_command *dmae = &bp->stats_dmae;
514 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
515
516 /* sanity */
517 if (!bp->func_stx) {
518 BNX2X_ERR("BUG!\n");
519 return;
520 }
521
522 bp->executer_idx = 0;
523 memset(dmae, 0, sizeof(struct dmae_command));
524
525 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
526 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
527 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
528#ifdef __BIG_ENDIAN
529 DMAE_CMD_ENDIANITY_B_DW_SWAP |
530#else
531 DMAE_CMD_ENDIANITY_DW_SWAP |
532#endif
533 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
534 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
535 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
536 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
537 dmae->dst_addr_lo = bp->func_stx >> 2;
538 dmae->dst_addr_hi = 0;
539 dmae->len = sizeof(struct host_func_stats) >> 2;
540 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
541 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
542 dmae->comp_val = DMAE_COMP_VAL;
543
544 *stats_comp = 0;
545}
546
547static void bnx2x_stats_start(struct bnx2x *bp)
548{
549 if (bp->port.pmf)
550 bnx2x_port_stats_init(bp);
551
552 else if (bp->func_stx)
553 bnx2x_func_stats_init(bp);
554
555 bnx2x_hw_stats_post(bp);
556 bnx2x_storm_stats_post(bp);
557}
558
559static void bnx2x_stats_pmf_start(struct bnx2x *bp)
560{
561 bnx2x_stats_comp(bp);
562 bnx2x_stats_pmf_update(bp);
563 bnx2x_stats_start(bp);
564}
565
566static void bnx2x_stats_restart(struct bnx2x *bp)
567{
568 bnx2x_stats_comp(bp);
569 bnx2x_stats_start(bp);
570}
571
572static void bnx2x_bmac_stats_update(struct bnx2x *bp)
573{
574 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
576 struct bnx2x_eth_stats *estats = &bp->eth_stats;
577 struct {
578 u32 lo;
579 u32 hi;
580 } diff;
581
582 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
583 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
584 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
585 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
586 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
587 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
588 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
590 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
591 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
592 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
593 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
594 UPDATE_STAT64(tx_stat_gt127,
595 tx_stat_etherstatspkts65octetsto127octets);
596 UPDATE_STAT64(tx_stat_gt255,
597 tx_stat_etherstatspkts128octetsto255octets);
598 UPDATE_STAT64(tx_stat_gt511,
599 tx_stat_etherstatspkts256octetsto511octets);
600 UPDATE_STAT64(tx_stat_gt1023,
601 tx_stat_etherstatspkts512octetsto1023octets);
602 UPDATE_STAT64(tx_stat_gt1518,
603 tx_stat_etherstatspkts1024octetsto1522octets);
604 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
605 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
606 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
607 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
608 UPDATE_STAT64(tx_stat_gterr,
609 tx_stat_dot3statsinternalmactransmiterrors);
610 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
611
612 estats->pause_frames_received_hi =
613 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
614 estats->pause_frames_received_lo =
615 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
616
617 estats->pause_frames_sent_hi =
618 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
619 estats->pause_frames_sent_lo =
620 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
621}
622
623static void bnx2x_emac_stats_update(struct bnx2x *bp)
624{
625 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
626 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
627 struct bnx2x_eth_stats *estats = &bp->eth_stats;
628
629 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
630 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
631 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
632 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
633 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
634 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
635 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
636 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
637 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
638 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
639 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
640 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
641 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
642 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
643 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
644 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
645 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
646 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
647 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
648 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
649 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
650 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
651 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
652 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
653 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
654 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
655 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
656 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
657 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
658 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
659 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
660
661 estats->pause_frames_received_hi =
662 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
663 estats->pause_frames_received_lo =
664 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
665 ADD_64(estats->pause_frames_received_hi,
666 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
667 estats->pause_frames_received_lo,
668 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
669
670 estats->pause_frames_sent_hi =
671 pstats->mac_stx[1].tx_stat_outxonsent_hi;
672 estats->pause_frames_sent_lo =
673 pstats->mac_stx[1].tx_stat_outxonsent_lo;
674 ADD_64(estats->pause_frames_sent_hi,
675 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
676 estats->pause_frames_sent_lo,
677 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
678}
679
680static int bnx2x_hw_stats_update(struct bnx2x *bp)
681{
682 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
683 struct nig_stats *old = &(bp->port.old_nig_stats);
684 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
685 struct bnx2x_eth_stats *estats = &bp->eth_stats;
686 struct {
687 u32 lo;
688 u32 hi;
689 } diff;
690
691 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
692 bnx2x_bmac_stats_update(bp);
693
694 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
695 bnx2x_emac_stats_update(bp);
696
697 else { /* unreached */
698 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
699 return -1;
700 }
701
702 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
703 new->brb_discard - old->brb_discard);
704 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
705 new->brb_truncate - old->brb_truncate);
706
707 UPDATE_STAT64_NIG(egress_mac_pkt0,
708 etherstatspkts1024octetsto1522octets);
709 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
710
711 memcpy(old, new, sizeof(struct nig_stats));
712
713 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
714 sizeof(struct mac_stx));
715 estats->brb_drop_hi = pstats->brb_drop_hi;
716 estats->brb_drop_lo = pstats->brb_drop_lo;
717
718 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
719
720 if (!BP_NOMCP(bp)) {
721 u32 nig_timer_max =
722 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
723 if (nig_timer_max != estats->nig_timer_max) {
724 estats->nig_timer_max = nig_timer_max;
725 BNX2X_ERR("NIG timer max (%u)\n",
726 estats->nig_timer_max);
727 }
728 }
729
730 return 0;
731}
732
733static int bnx2x_storm_stats_update(struct bnx2x *bp)
734{
735 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
736 struct tstorm_per_port_stats *tport =
737 &stats->tstorm_common.port_statistics;
738 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
739 struct bnx2x_eth_stats *estats = &bp->eth_stats;
740 int i;
741 u16 cur_stats_counter;
742
743 /* Make sure we use the value of the counter
744 * used for sending the last stats ramrod.
745 */
746 spin_lock_bh(&bp->stats_lock);
747 cur_stats_counter = bp->stats_counter - 1;
748 spin_unlock_bh(&bp->stats_lock);
749
750 memcpy(&(fstats->total_bytes_received_hi),
751 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
752 sizeof(struct host_func_stats) - 2*sizeof(u32));
753 estats->error_bytes_received_hi = 0;
754 estats->error_bytes_received_lo = 0;
755 estats->etherstatsoverrsizepkts_hi = 0;
756 estats->etherstatsoverrsizepkts_lo = 0;
757 estats->no_buff_discard_hi = 0;
758 estats->no_buff_discard_lo = 0;
759
760 for_each_queue(bp, i) {
761 struct bnx2x_fastpath *fp = &bp->fp[i];
762 int cl_id = fp->cl_id;
763 struct tstorm_per_client_stats *tclient =
764 &stats->tstorm_common.client_statistics[cl_id];
765 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
766 struct ustorm_per_client_stats *uclient =
767 &stats->ustorm_common.client_statistics[cl_id];
768 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
769 struct xstorm_per_client_stats *xclient =
770 &stats->xstorm_common.client_statistics[cl_id];
771 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
772 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
773 u32 diff;
774
775 /* are storm stats valid? */
776 if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
777 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
778 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
779 i, xclient->stats_counter, cur_stats_counter + 1);
780 return -1;
781 }
782 if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
783 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
784 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
785 i, tclient->stats_counter, cur_stats_counter + 1);
786 return -2;
787 }
788 if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
789 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
790 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
791 i, uclient->stats_counter, cur_stats_counter + 1);
792 return -4;
793 }
794
795 qstats->total_bytes_received_hi =
796 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
797 qstats->total_bytes_received_lo =
798 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
799
800 ADD_64(qstats->total_bytes_received_hi,
801 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
802 qstats->total_bytes_received_lo,
803 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
804
805 ADD_64(qstats->total_bytes_received_hi,
806 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
807 qstats->total_bytes_received_lo,
808 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
809
810 SUB_64(qstats->total_bytes_received_hi,
811 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
812 qstats->total_bytes_received_lo,
813 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
814
815 SUB_64(qstats->total_bytes_received_hi,
816 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
817 qstats->total_bytes_received_lo,
818 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
819
820 SUB_64(qstats->total_bytes_received_hi,
821 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
822 qstats->total_bytes_received_lo,
823 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
824
825 qstats->valid_bytes_received_hi =
826 qstats->total_bytes_received_hi;
827 qstats->valid_bytes_received_lo =
828 qstats->total_bytes_received_lo;
829
830 qstats->error_bytes_received_hi =
831 le32_to_cpu(tclient->rcv_error_bytes.hi);
832 qstats->error_bytes_received_lo =
833 le32_to_cpu(tclient->rcv_error_bytes.lo);
834
835 ADD_64(qstats->total_bytes_received_hi,
836 qstats->error_bytes_received_hi,
837 qstats->total_bytes_received_lo,
838 qstats->error_bytes_received_lo);
839
840 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
841 total_unicast_packets_received);
842 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
843 total_multicast_packets_received);
844 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
845 total_broadcast_packets_received);
846 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
847 etherstatsoverrsizepkts);
848 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
849
850 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
851 total_unicast_packets_received);
852 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
853 total_multicast_packets_received);
854 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
855 total_broadcast_packets_received);
856 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
857 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
858 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
859
860 qstats->total_bytes_transmitted_hi =
861 le32_to_cpu(xclient->unicast_bytes_sent.hi);
862 qstats->total_bytes_transmitted_lo =
863 le32_to_cpu(xclient->unicast_bytes_sent.lo);
864
865 ADD_64(qstats->total_bytes_transmitted_hi,
866 le32_to_cpu(xclient->multicast_bytes_sent.hi),
867 qstats->total_bytes_transmitted_lo,
868 le32_to_cpu(xclient->multicast_bytes_sent.lo));
869
870 ADD_64(qstats->total_bytes_transmitted_hi,
871 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
872 qstats->total_bytes_transmitted_lo,
873 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
874
875 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
876 total_unicast_packets_transmitted);
877 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
878 total_multicast_packets_transmitted);
879 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
880 total_broadcast_packets_transmitted);
881
882 old_tclient->checksum_discard = tclient->checksum_discard;
883 old_tclient->ttl0_discard = tclient->ttl0_discard;
884
885 ADD_64(fstats->total_bytes_received_hi,
886 qstats->total_bytes_received_hi,
887 fstats->total_bytes_received_lo,
888 qstats->total_bytes_received_lo);
889 ADD_64(fstats->total_bytes_transmitted_hi,
890 qstats->total_bytes_transmitted_hi,
891 fstats->total_bytes_transmitted_lo,
892 qstats->total_bytes_transmitted_lo);
893 ADD_64(fstats->total_unicast_packets_received_hi,
894 qstats->total_unicast_packets_received_hi,
895 fstats->total_unicast_packets_received_lo,
896 qstats->total_unicast_packets_received_lo);
897 ADD_64(fstats->total_multicast_packets_received_hi,
898 qstats->total_multicast_packets_received_hi,
899 fstats->total_multicast_packets_received_lo,
900 qstats->total_multicast_packets_received_lo);
901 ADD_64(fstats->total_broadcast_packets_received_hi,
902 qstats->total_broadcast_packets_received_hi,
903 fstats->total_broadcast_packets_received_lo,
904 qstats->total_broadcast_packets_received_lo);
905 ADD_64(fstats->total_unicast_packets_transmitted_hi,
906 qstats->total_unicast_packets_transmitted_hi,
907 fstats->total_unicast_packets_transmitted_lo,
908 qstats->total_unicast_packets_transmitted_lo);
909 ADD_64(fstats->total_multicast_packets_transmitted_hi,
910 qstats->total_multicast_packets_transmitted_hi,
911 fstats->total_multicast_packets_transmitted_lo,
912 qstats->total_multicast_packets_transmitted_lo);
913 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
914 qstats->total_broadcast_packets_transmitted_hi,
915 fstats->total_broadcast_packets_transmitted_lo,
916 qstats->total_broadcast_packets_transmitted_lo);
917 ADD_64(fstats->valid_bytes_received_hi,
918 qstats->valid_bytes_received_hi,
919 fstats->valid_bytes_received_lo,
920 qstats->valid_bytes_received_lo);
921
922 ADD_64(estats->error_bytes_received_hi,
923 qstats->error_bytes_received_hi,
924 estats->error_bytes_received_lo,
925 qstats->error_bytes_received_lo);
926 ADD_64(estats->etherstatsoverrsizepkts_hi,
927 qstats->etherstatsoverrsizepkts_hi,
928 estats->etherstatsoverrsizepkts_lo,
929 qstats->etherstatsoverrsizepkts_lo);
930 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
931 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
932 }
933
934 ADD_64(fstats->total_bytes_received_hi,
935 estats->rx_stat_ifhcinbadoctets_hi,
936 fstats->total_bytes_received_lo,
937 estats->rx_stat_ifhcinbadoctets_lo);
938
939 memcpy(estats, &(fstats->total_bytes_received_hi),
940 sizeof(struct host_func_stats) - 2*sizeof(u32));
941
942 ADD_64(estats->etherstatsoverrsizepkts_hi,
943 estats->rx_stat_dot3statsframestoolong_hi,
944 estats->etherstatsoverrsizepkts_lo,
945 estats->rx_stat_dot3statsframestoolong_lo);
946 ADD_64(estats->error_bytes_received_hi,
947 estats->rx_stat_ifhcinbadoctets_hi,
948 estats->error_bytes_received_lo,
949 estats->rx_stat_ifhcinbadoctets_lo);
950
951 if (bp->port.pmf) {
952 estats->mac_filter_discard =
953 le32_to_cpu(tport->mac_filter_discard);
954 estats->xxoverflow_discard =
955 le32_to_cpu(tport->xxoverflow_discard);
956 estats->brb_truncate_discard =
957 le32_to_cpu(tport->brb_truncate_discard);
958 estats->mac_discard = le32_to_cpu(tport->mac_discard);
959 }
960
961 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
962
963 bp->stats_pending = 0;
964
965 return 0;
966}
967
968static void bnx2x_net_stats_update(struct bnx2x *bp)
969{
970 struct bnx2x_eth_stats *estats = &bp->eth_stats;
971 struct net_device_stats *nstats = &bp->dev->stats;
972 int i;
973
974 nstats->rx_packets =
975 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
976 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
977 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
978
979 nstats->tx_packets =
980 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
981 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
982 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
983
984 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
985
986 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
987
988 nstats->rx_dropped = estats->mac_discard;
989 for_each_queue(bp, i)
990 nstats->rx_dropped +=
991 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
992
993 nstats->tx_dropped = 0;
994
995 nstats->multicast =
996 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
997
998 nstats->collisions =
999 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1000
1001 nstats->rx_length_errors =
1002 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1003 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1004 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1005 bnx2x_hilo(&estats->brb_truncate_hi);
1006 nstats->rx_crc_errors =
1007 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1008 nstats->rx_frame_errors =
1009 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1010 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1011 nstats->rx_missed_errors = estats->xxoverflow_discard;
1012
1013 nstats->rx_errors = nstats->rx_length_errors +
1014 nstats->rx_over_errors +
1015 nstats->rx_crc_errors +
1016 nstats->rx_frame_errors +
1017 nstats->rx_fifo_errors +
1018 nstats->rx_missed_errors;
1019
1020 nstats->tx_aborted_errors =
1021 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1022 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1023 nstats->tx_carrier_errors =
1024 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1025 nstats->tx_fifo_errors = 0;
1026 nstats->tx_heartbeat_errors = 0;
1027 nstats->tx_window_errors = 0;
1028
1029 nstats->tx_errors = nstats->tx_aborted_errors +
1030 nstats->tx_carrier_errors +
1031 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1032}
1033
1034static void bnx2x_drv_stats_update(struct bnx2x *bp)
1035{
1036 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1037 int i;
1038
1039 estats->driver_xoff = 0;
1040 estats->rx_err_discard_pkt = 0;
1041 estats->rx_skb_alloc_failed = 0;
1042 estats->hw_csum_err = 0;
1043 for_each_queue(bp, i) {
1044 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
1045
1046 estats->driver_xoff += qstats->driver_xoff;
1047 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
1048 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
1049 estats->hw_csum_err += qstats->hw_csum_err;
1050 }
1051}
1052
1053static void bnx2x_stats_update(struct bnx2x *bp)
1054{
1055 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1056
1057 if (*stats_comp != DMAE_COMP_VAL)
1058 return;
1059
1060 if (bp->port.pmf)
1061 bnx2x_hw_stats_update(bp);
1062
1063 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1064 BNX2X_ERR("storm stats were not updated for 3 times\n");
1065 bnx2x_panic();
1066 return;
1067 }
1068
1069 bnx2x_net_stats_update(bp);
1070 bnx2x_drv_stats_update(bp);
1071
1072 if (netif_msg_timer(bp)) {
1073 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1074 int i;
1075
1076 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
1077 bp->dev->name,
1078 estats->brb_drop_lo, estats->brb_truncate_lo);
1079
1080 for_each_queue(bp, i) {
1081 struct bnx2x_fastpath *fp = &bp->fp[i];
1082 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1083
1084 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
1085 " rx pkt(%lu) rx calls(%lu %lu)\n",
1086 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
1087 fp->rx_comp_cons),
1088 le16_to_cpu(*fp->rx_cons_sb),
1089 bnx2x_hilo(&qstats->
1090 total_unicast_packets_received_hi),
1091 fp->rx_calls, fp->rx_pkt);
1092 }
1093
1094 for_each_queue(bp, i) {
1095 struct bnx2x_fastpath *fp = &bp->fp[i];
1096 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1097 struct netdev_queue *txq =
1098 netdev_get_tx_queue(bp->dev, i);
1099
1100 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
1101 " tx pkt(%lu) tx calls (%lu)"
1102 " %s (Xoff events %u)\n",
1103 fp->name, bnx2x_tx_avail(fp),
1104 le16_to_cpu(*fp->tx_cons_sb),
1105 bnx2x_hilo(&qstats->
1106 total_unicast_packets_transmitted_hi),
1107 fp->tx_pkt,
1108 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
1109 qstats->driver_xoff);
1110 }
1111 }
1112
1113 bnx2x_hw_stats_post(bp);
1114 bnx2x_storm_stats_post(bp);
1115}
1116
1117static void bnx2x_port_stats_stop(struct bnx2x *bp)
1118{
1119 struct dmae_command *dmae;
1120 u32 opcode;
1121 int loader_idx = PMF_DMAE_C(bp);
1122 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1123
1124 bp->executer_idx = 0;
1125
1126 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
1127 DMAE_CMD_C_ENABLE |
1128 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1129#ifdef __BIG_ENDIAN
1130 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1131#else
1132 DMAE_CMD_ENDIANITY_DW_SWAP |
1133#endif
1134 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1135 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1136
1137 if (bp->port.port_stx) {
1138
1139 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1140 if (bp->func_stx)
1141 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
1142 else
1143 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
1144 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1145 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1146 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1147 dmae->dst_addr_hi = 0;
1148 dmae->len = sizeof(struct host_port_stats) >> 2;
1149 if (bp->func_stx) {
1150 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1151 dmae->comp_addr_hi = 0;
1152 dmae->comp_val = 1;
1153 } else {
1154 dmae->comp_addr_lo =
1155 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1156 dmae->comp_addr_hi =
1157 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1158 dmae->comp_val = DMAE_COMP_VAL;
1159
1160 *stats_comp = 0;
1161 }
1162 }
1163
1164 if (bp->func_stx) {
1165
1166 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1167 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
1168 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1169 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1170 dmae->dst_addr_lo = bp->func_stx >> 2;
1171 dmae->dst_addr_hi = 0;
1172 dmae->len = sizeof(struct host_func_stats) >> 2;
1173 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1174 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1175 dmae->comp_val = DMAE_COMP_VAL;
1176
1177 *stats_comp = 0;
1178 }
1179}
1180
1181static void bnx2x_stats_stop(struct bnx2x *bp)
1182{
1183 int update = 0;
1184
1185 bnx2x_stats_comp(bp);
1186
1187 if (bp->port.pmf)
1188 update = (bnx2x_hw_stats_update(bp) == 0);
1189
1190 update |= (bnx2x_storm_stats_update(bp) == 0);
1191
1192 if (update) {
1193 bnx2x_net_stats_update(bp);
1194
1195 if (bp->port.pmf)
1196 bnx2x_port_stats_stop(bp);
1197
1198 bnx2x_hw_stats_post(bp);
1199 bnx2x_stats_comp(bp);
1200 }
1201}
1202
1203static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1204{
1205}
1206
1207static const struct {
1208 void (*action)(struct bnx2x *bp);
1209 enum bnx2x_stats_state next_state;
1210} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1211/* state event */
1212{
1213/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1214/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1215/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1216/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1217},
1218{
1219/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1220/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1221/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1222/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1223}
1224};
1225
1226void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1227{
1228 enum bnx2x_stats_state state;
1229
1230 if (unlikely(bp->panic))
1231 return;
1232
1233 /* Protect a state change flow */
1234 spin_lock_bh(&bp->stats_lock);
1235 state = bp->stats_state;
1236 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1237 spin_unlock_bh(&bp->stats_lock);
1238
1239 bnx2x_stats_stm[state][event].action(bp);
1240
1241 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1242 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1243 state, event, bp->stats_state);
1244}
1245
1246static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1247{
1248 struct dmae_command *dmae;
1249 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1250
1251 /* sanity */
1252 if (!bp->port.pmf || !bp->port.port_stx) {
1253 BNX2X_ERR("BUG!\n");
1254 return;
1255 }
1256
1257 bp->executer_idx = 0;
1258
1259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1260 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
1261 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
1262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1263#ifdef __BIG_ENDIAN
1264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1265#else
1266 DMAE_CMD_ENDIANITY_DW_SWAP |
1267#endif
1268 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1269 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1270 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1271 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1272 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1273 dmae->dst_addr_hi = 0;
1274 dmae->len = sizeof(struct host_port_stats) >> 2;
1275 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1276 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1277 dmae->comp_val = DMAE_COMP_VAL;
1278
1279 *stats_comp = 0;
1280 bnx2x_hw_stats_post(bp);
1281 bnx2x_stats_comp(bp);
1282}
1283
1284static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1285{
1286 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
1287 int port = BP_PORT(bp);
1288 int func;
1289 u32 func_stx;
1290
1291 /* sanity */
1292 if (!bp->port.pmf || !bp->func_stx) {
1293 BNX2X_ERR("BUG!\n");
1294 return;
1295 }
1296
1297 /* save our func_stx */
1298 func_stx = bp->func_stx;
1299
1300 for (vn = VN_0; vn < vn_max; vn++) {
1301 func = 2*vn + port;
1302
1303 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
1304 bnx2x_func_stats_init(bp);
1305 bnx2x_hw_stats_post(bp);
1306 bnx2x_stats_comp(bp);
1307 }
1308
1309 /* restore our func_stx */
1310 bp->func_stx = func_stx;
1311}
1312
1313static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1314{
1315 struct dmae_command *dmae = &bp->stats_dmae;
1316 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1317
1318 /* sanity */
1319 if (!bp->func_stx) {
1320 BNX2X_ERR("BUG!\n");
1321 return;
1322 }
1323
1324 bp->executer_idx = 0;
1325 memset(dmae, 0, sizeof(struct dmae_command));
1326
1327 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
1328 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
1329 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1330#ifdef __BIG_ENDIAN
1331 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1332#else
1333 DMAE_CMD_ENDIANITY_DW_SWAP |
1334#endif
1335 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1336 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1337 dmae->src_addr_lo = bp->func_stx >> 2;
1338 dmae->src_addr_hi = 0;
1339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
1340 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
1341 dmae->len = sizeof(struct host_func_stats) >> 2;
1342 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1343 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1344 dmae->comp_val = DMAE_COMP_VAL;
1345
1346 *stats_comp = 0;
1347 bnx2x_hw_stats_post(bp);
1348 bnx2x_stats_comp(bp);
1349}
1350
1351void bnx2x_stats_init(struct bnx2x *bp)
1352{
1353 int port = BP_PORT(bp);
1354 int func = BP_FUNC(bp);
1355 int i;
1356
1357 bp->stats_pending = 0;
1358 bp->executer_idx = 0;
1359 bp->stats_counter = 0;
1360
1361 /* port and func stats for management */
1362 if (!BP_NOMCP(bp)) {
1363 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1364 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
1365
1366 } else {
1367 bp->port.port_stx = 0;
1368 bp->func_stx = 0;
1369 }
1370 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1371 bp->port.port_stx, bp->func_stx);
1372
1373 /* port stats */
1374 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1375 bp->port.old_nig_stats.brb_discard =
1376 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1377 bp->port.old_nig_stats.brb_truncate =
1378 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1379 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1380 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1381 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1382 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1383
1384 /* function stats */
1385 for_each_queue(bp, i) {
1386 struct bnx2x_fastpath *fp = &bp->fp[i];
1387
1388 memset(&fp->old_tclient, 0,
1389 sizeof(struct tstorm_per_client_stats));
1390 memset(&fp->old_uclient, 0,
1391 sizeof(struct ustorm_per_client_stats));
1392 memset(&fp->old_xclient, 0,
1393 sizeof(struct xstorm_per_client_stats));
1394 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
1395 }
1396
1397 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
1398 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
1399
1400 bp->stats_state = STATS_STATE_DISABLED;
1401
1402 if (bp->port.pmf) {
1403 if (bp->port.port_stx)
1404 bnx2x_port_stats_base_init(bp);
1405
1406 if (bp->func_stx)
1407 bnx2x_func_stats_base_init(bp);
1408
1409 } else if (bp->func_stx)
1410 bnx2x_func_stats_base_update(bp);
1411}
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
new file mode 100644
index 00000000000..38a4e908f4f
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -0,0 +1,239 @@
1/* bnx2x_stats.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 */
13
14#ifndef BNX2X_STATS_H
15#define BNX2X_STATS_H
16
17#include <linux/types.h>
18
19struct bnx2x_eth_q_stats {
20 u32 total_bytes_received_hi;
21 u32 total_bytes_received_lo;
22 u32 total_bytes_transmitted_hi;
23 u32 total_bytes_transmitted_lo;
24 u32 total_unicast_packets_received_hi;
25 u32 total_unicast_packets_received_lo;
26 u32 total_multicast_packets_received_hi;
27 u32 total_multicast_packets_received_lo;
28 u32 total_broadcast_packets_received_hi;
29 u32 total_broadcast_packets_received_lo;
30 u32 total_unicast_packets_transmitted_hi;
31 u32 total_unicast_packets_transmitted_lo;
32 u32 total_multicast_packets_transmitted_hi;
33 u32 total_multicast_packets_transmitted_lo;
34 u32 total_broadcast_packets_transmitted_hi;
35 u32 total_broadcast_packets_transmitted_lo;
36 u32 valid_bytes_received_hi;
37 u32 valid_bytes_received_lo;
38
39 u32 error_bytes_received_hi;
40 u32 error_bytes_received_lo;
41 u32 etherstatsoverrsizepkts_hi;
42 u32 etherstatsoverrsizepkts_lo;
43 u32 no_buff_discard_hi;
44 u32 no_buff_discard_lo;
45
46 u32 driver_xoff;
47 u32 rx_err_discard_pkt;
48 u32 rx_skb_alloc_failed;
49 u32 hw_csum_err;
50};
51
52#define BNX2X_NUM_Q_STATS 13
53#define Q_STATS_OFFSET32(stat_name) \
54 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
55
56struct nig_stats {
57 u32 brb_discard;
58 u32 brb_packet;
59 u32 brb_truncate;
60 u32 flow_ctrl_discard;
61 u32 flow_ctrl_octets;
62 u32 flow_ctrl_packet;
63 u32 mng_discard;
64 u32 mng_octet_inp;
65 u32 mng_octet_out;
66 u32 mng_packet_inp;
67 u32 mng_packet_out;
68 u32 pbf_octets;
69 u32 pbf_packet;
70 u32 safc_inp;
71 u32 egress_mac_pkt0_lo;
72 u32 egress_mac_pkt0_hi;
73 u32 egress_mac_pkt1_lo;
74 u32 egress_mac_pkt1_hi;
75};
76
77
78enum bnx2x_stats_event {
79 STATS_EVENT_PMF = 0,
80 STATS_EVENT_LINK_UP,
81 STATS_EVENT_UPDATE,
82 STATS_EVENT_STOP,
83 STATS_EVENT_MAX
84};
85
86enum bnx2x_stats_state {
87 STATS_STATE_DISABLED = 0,
88 STATS_STATE_ENABLED,
89 STATS_STATE_MAX
90};
91
92struct bnx2x_eth_stats {
93 u32 total_bytes_received_hi;
94 u32 total_bytes_received_lo;
95 u32 total_bytes_transmitted_hi;
96 u32 total_bytes_transmitted_lo;
97 u32 total_unicast_packets_received_hi;
98 u32 total_unicast_packets_received_lo;
99 u32 total_multicast_packets_received_hi;
100 u32 total_multicast_packets_received_lo;
101 u32 total_broadcast_packets_received_hi;
102 u32 total_broadcast_packets_received_lo;
103 u32 total_unicast_packets_transmitted_hi;
104 u32 total_unicast_packets_transmitted_lo;
105 u32 total_multicast_packets_transmitted_hi;
106 u32 total_multicast_packets_transmitted_lo;
107 u32 total_broadcast_packets_transmitted_hi;
108 u32 total_broadcast_packets_transmitted_lo;
109 u32 valid_bytes_received_hi;
110 u32 valid_bytes_received_lo;
111
112 u32 error_bytes_received_hi;
113 u32 error_bytes_received_lo;
114 u32 etherstatsoverrsizepkts_hi;
115 u32 etherstatsoverrsizepkts_lo;
116 u32 no_buff_discard_hi;
117 u32 no_buff_discard_lo;
118
119 u32 rx_stat_ifhcinbadoctets_hi;
120 u32 rx_stat_ifhcinbadoctets_lo;
121 u32 tx_stat_ifhcoutbadoctets_hi;
122 u32 tx_stat_ifhcoutbadoctets_lo;
123 u32 rx_stat_dot3statsfcserrors_hi;
124 u32 rx_stat_dot3statsfcserrors_lo;
125 u32 rx_stat_dot3statsalignmenterrors_hi;
126 u32 rx_stat_dot3statsalignmenterrors_lo;
127 u32 rx_stat_dot3statscarriersenseerrors_hi;
128 u32 rx_stat_dot3statscarriersenseerrors_lo;
129 u32 rx_stat_falsecarriererrors_hi;
130 u32 rx_stat_falsecarriererrors_lo;
131 u32 rx_stat_etherstatsundersizepkts_hi;
132 u32 rx_stat_etherstatsundersizepkts_lo;
133 u32 rx_stat_dot3statsframestoolong_hi;
134 u32 rx_stat_dot3statsframestoolong_lo;
135 u32 rx_stat_etherstatsfragments_hi;
136 u32 rx_stat_etherstatsfragments_lo;
137 u32 rx_stat_etherstatsjabbers_hi;
138 u32 rx_stat_etherstatsjabbers_lo;
139 u32 rx_stat_maccontrolframesreceived_hi;
140 u32 rx_stat_maccontrolframesreceived_lo;
141 u32 rx_stat_bmac_xpf_hi;
142 u32 rx_stat_bmac_xpf_lo;
143 u32 rx_stat_bmac_xcf_hi;
144 u32 rx_stat_bmac_xcf_lo;
145 u32 rx_stat_xoffstateentered_hi;
146 u32 rx_stat_xoffstateentered_lo;
147 u32 rx_stat_xonpauseframesreceived_hi;
148 u32 rx_stat_xonpauseframesreceived_lo;
149 u32 rx_stat_xoffpauseframesreceived_hi;
150 u32 rx_stat_xoffpauseframesreceived_lo;
151 u32 tx_stat_outxonsent_hi;
152 u32 tx_stat_outxonsent_lo;
153 u32 tx_stat_outxoffsent_hi;
154 u32 tx_stat_outxoffsent_lo;
155 u32 tx_stat_flowcontroldone_hi;
156 u32 tx_stat_flowcontroldone_lo;
157 u32 tx_stat_etherstatscollisions_hi;
158 u32 tx_stat_etherstatscollisions_lo;
159 u32 tx_stat_dot3statssinglecollisionframes_hi;
160 u32 tx_stat_dot3statssinglecollisionframes_lo;
161 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
162 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
163 u32 tx_stat_dot3statsdeferredtransmissions_hi;
164 u32 tx_stat_dot3statsdeferredtransmissions_lo;
165 u32 tx_stat_dot3statsexcessivecollisions_hi;
166 u32 tx_stat_dot3statsexcessivecollisions_lo;
167 u32 tx_stat_dot3statslatecollisions_hi;
168 u32 tx_stat_dot3statslatecollisions_lo;
169 u32 tx_stat_etherstatspkts64octets_hi;
170 u32 tx_stat_etherstatspkts64octets_lo;
171 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
172 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
173 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
174 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
175 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
176 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
177 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
178 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
179 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
180 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
181 u32 tx_stat_etherstatspktsover1522octets_hi;
182 u32 tx_stat_etherstatspktsover1522octets_lo;
183 u32 tx_stat_bmac_2047_hi;
184 u32 tx_stat_bmac_2047_lo;
185 u32 tx_stat_bmac_4095_hi;
186 u32 tx_stat_bmac_4095_lo;
187 u32 tx_stat_bmac_9216_hi;
188 u32 tx_stat_bmac_9216_lo;
189 u32 tx_stat_bmac_16383_hi;
190 u32 tx_stat_bmac_16383_lo;
191 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
192 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
193 u32 tx_stat_bmac_ufl_hi;
194 u32 tx_stat_bmac_ufl_lo;
195
196 u32 pause_frames_received_hi;
197 u32 pause_frames_received_lo;
198 u32 pause_frames_sent_hi;
199 u32 pause_frames_sent_lo;
200
201 u32 etherstatspkts1024octetsto1522octets_hi;
202 u32 etherstatspkts1024octetsto1522octets_lo;
203 u32 etherstatspktsover1522octets_hi;
204 u32 etherstatspktsover1522octets_lo;
205
206 u32 brb_drop_hi;
207 u32 brb_drop_lo;
208 u32 brb_truncate_hi;
209 u32 brb_truncate_lo;
210
211 u32 mac_filter_discard;
212 u32 xxoverflow_discard;
213 u32 brb_truncate_discard;
214 u32 mac_discard;
215
216 u32 driver_xoff;
217 u32 rx_err_discard_pkt;
218 u32 rx_skb_alloc_failed;
219 u32 hw_csum_err;
220
221 u32 nig_timer_max;
222};
223
224#define BNX2X_NUM_STATS 43
225#define STATS_OFFSET32(stat_name) \
226 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
227
228/* Forward declaration */
229struct bnx2x;
230
231
232void bnx2x_stats_init(struct bnx2x *bp);
233
234extern const u32 dmae_reg_go_c[];
235extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
236 u32 data_hi, u32 data_lo, int common);
237
238
239#endif /* BNX2X_STATS_H */