aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2010-07-27 08:31:10 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-27 23:35:39 -0400
commit5d1e859c5b600c491336f023a2f2105c24597226 (patch)
tree9391b004d9237f385c36297214a022cadcb8f84a /drivers/net/bnx2x
parent2c6952dfdda2f266f2f501792b8d6413caf25f7a (diff)
bnx2x: Create separate folder for bnx2x driver
This commit includes files movement to newly created folder using git-mv command and fixes references in cnic and bnx2x code to each other. files moved using following: #!/bin/bash mkdir drivers/net/bnx2x/ list=$(cd drivers/net/ && ls bnx2x*.[ch]) for f in $list; do git mv -f drivers/net/$f drivers/net/bnx2x/$f done Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r--drivers/net/bnx2x/Makefile7
-rw-r--r--drivers/net/bnx2x/bnx2x.h1376
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h534
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h594
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h37
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h3138
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h152
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h506
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c6735
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h206
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c13933
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h5364
12 files changed, 32582 insertions, 0 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
new file mode 100644
index 000000000000..46c853b6cc53
--- /dev/null
+++ b/drivers/net/bnx2x/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Broadcom 10-Gigabit ethernet driver
3#
4
5obj-$(CONFIG_BNX2X) += bnx2x.o
6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
new file mode 100644
index 000000000000..3b51c5f0b0a3
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -0,0 +1,1376 @@
1/* bnx2x.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 */
13
14#ifndef BNX2X_H
15#define BNX2X_H
16
17/* compilation time flags */
18
19/* define this to make the driver freeze on error to allow getting debug info
20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */
22
23#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
24#define BCM_VLAN 1
25#endif
26
27#define BNX2X_MULTI_QUEUE
28
29#define BNX2X_NEW_NAPI
30
31
32
33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
34#define BCM_CNIC 1
35#include "../cnic_if.h"
36#endif
37
38
39#ifdef BCM_CNIC
40#define BNX2X_MIN_MSIX_VEC_CNT 3
41#define BNX2X_MSIX_VEC_FP_START 2
42#else
43#define BNX2X_MIN_MSIX_VEC_CNT 2
44#define BNX2X_MSIX_VEC_FP_START 1
45#endif
46
47#include <linux/mdio.h>
48#include "bnx2x_reg.h"
49#include "bnx2x_fw_defs.h"
50#include "bnx2x_hsi.h"
51#include "bnx2x_link.h"
52
53/* error/debug prints */
54
55#define DRV_MODULE_NAME "bnx2x"
56
57/* for messages that are currently off */
58#define BNX2X_MSG_OFF 0
59#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
60#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
61#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
62#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
63#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
64#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
65
66#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
67
68/* regular debug print */
69#define DP(__mask, __fmt, __args...) \
70do { \
71 if (bp->msg_enable & (__mask)) \
72 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
73 __func__, __LINE__, \
74 bp->dev ? (bp->dev->name) : "?", \
75 ##__args); \
76} while (0)
77
78/* errors debug print */
79#define BNX2X_DBG_ERR(__fmt, __args...) \
80do { \
81 if (netif_msg_probe(bp)) \
82 pr_err("[%s:%d(%s)]" __fmt, \
83 __func__, __LINE__, \
84 bp->dev ? (bp->dev->name) : "?", \
85 ##__args); \
86} while (0)
87
88/* for errors (never masked) */
89#define BNX2X_ERR(__fmt, __args...) \
90do { \
91 pr_err("[%s:%d(%s)]" __fmt, \
92 __func__, __LINE__, \
93 bp->dev ? (bp->dev->name) : "?", \
94 ##__args); \
95 } while (0)
96
97#define BNX2X_ERROR(__fmt, __args...) do { \
98 pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
99 } while (0)
100
101
102/* before we have a dev->name use dev_info() */
103#define BNX2X_DEV_INFO(__fmt, __args...) \
104do { \
105 if (netif_msg_probe(bp)) \
106 dev_info(&bp->pdev->dev, __fmt, ##__args); \
107} while (0)
108
109
110#ifdef BNX2X_STOP_ON_ERROR
111#define bnx2x_panic() do { \
112 bp->panic = 1; \
113 BNX2X_ERR("driver assert\n"); \
114 bnx2x_int_disable(bp); \
115 bnx2x_panic_dump(bp); \
116 } while (0)
117#else
118#define bnx2x_panic() do { \
119 bp->panic = 1; \
120 BNX2X_ERR("driver assert\n"); \
121 bnx2x_panic_dump(bp); \
122 } while (0)
123#endif
124
125
126#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
127#define U64_HI(x) (u32)(((u64)(x)) >> 32)
128#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
129
130
131#define REG_ADDR(bp, offset) (bp->regview + offset)
132
133#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
134#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
135
136#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
137#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
138#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
139
140#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
141#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
142
143#define REG_RD_DMAE(bp, offset, valp, len32) \
144 do { \
145 bnx2x_read_dmae(bp, offset, len32);\
146 memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
147 } while (0)
148
149#define REG_WR_DMAE(bp, offset, valp, len32) \
150 do { \
151 memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
152 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
153 offset, len32); \
154 } while (0)
155
156#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
157 do { \
158 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
159 bnx2x_write_big_buf_wb(bp, addr, len32); \
160 } while (0)
161
162#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
163 offsetof(struct shmem_region, field))
164#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
165#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
166
167#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
168 offsetof(struct shmem2_region, field))
169#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
170#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
171
172#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
173#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
174
175#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
176#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
177
178#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
179 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
180
181
182/* fast path */
183
184struct sw_rx_bd {
185 struct sk_buff *skb;
186 DEFINE_DMA_UNMAP_ADDR(mapping);
187};
188
189struct sw_tx_bd {
190 struct sk_buff *skb;
191 u16 first_bd;
192 u8 flags;
193/* Set on the first BD descriptor when there is a split BD */
194#define BNX2X_TSO_SPLIT_BD (1<<0)
195};
196
197struct sw_rx_page {
198 struct page *page;
199 DEFINE_DMA_UNMAP_ADDR(mapping);
200};
201
202union db_prod {
203 struct doorbell_set_prod data;
204 u32 raw;
205};
206
207
208/* MC hsi */
209#define BCM_PAGE_SHIFT 12
210#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
211#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
212#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
213
214#define PAGES_PER_SGE_SHIFT 0
215#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
216#define SGE_PAGE_SIZE PAGE_SIZE
217#define SGE_PAGE_SHIFT PAGE_SHIFT
218#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
219
220/* SGE ring related macros */
221#define NUM_RX_SGE_PAGES 2
222#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
223#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
224/* RX_SGE_CNT is promised to be a power of 2 */
225#define RX_SGE_MASK (RX_SGE_CNT - 1)
226#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
227#define MAX_RX_SGE (NUM_RX_SGE - 1)
228#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
229 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
230#define RX_SGE(x) ((x) & MAX_RX_SGE)
231
232/* SGE producer mask related macros */
233/* Number of bits in one sge_mask array element */
234#define RX_SGE_MASK_ELEM_SZ 64
235#define RX_SGE_MASK_ELEM_SHIFT 6
236#define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1)
237
238/* Creates a bitmask of all ones in less significant bits.
239 idx - index of the most significant bit in the created mask */
240#define RX_SGE_ONES_MASK(idx) \
241 (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
242#define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0))
243
244/* Number of u64 elements in SGE mask array */
245#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
246 RX_SGE_MASK_ELEM_SZ)
247#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
248#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
249
250
251struct bnx2x_eth_q_stats {
252 u32 total_bytes_received_hi;
253 u32 total_bytes_received_lo;
254 u32 total_bytes_transmitted_hi;
255 u32 total_bytes_transmitted_lo;
256 u32 total_unicast_packets_received_hi;
257 u32 total_unicast_packets_received_lo;
258 u32 total_multicast_packets_received_hi;
259 u32 total_multicast_packets_received_lo;
260 u32 total_broadcast_packets_received_hi;
261 u32 total_broadcast_packets_received_lo;
262 u32 total_unicast_packets_transmitted_hi;
263 u32 total_unicast_packets_transmitted_lo;
264 u32 total_multicast_packets_transmitted_hi;
265 u32 total_multicast_packets_transmitted_lo;
266 u32 total_broadcast_packets_transmitted_hi;
267 u32 total_broadcast_packets_transmitted_lo;
268 u32 valid_bytes_received_hi;
269 u32 valid_bytes_received_lo;
270
271 u32 error_bytes_received_hi;
272 u32 error_bytes_received_lo;
273 u32 etherstatsoverrsizepkts_hi;
274 u32 etherstatsoverrsizepkts_lo;
275 u32 no_buff_discard_hi;
276 u32 no_buff_discard_lo;
277
278 u32 driver_xoff;
279 u32 rx_err_discard_pkt;
280 u32 rx_skb_alloc_failed;
281 u32 hw_csum_err;
282};
283
284#define BNX2X_NUM_Q_STATS 13
285#define Q_STATS_OFFSET32(stat_name) \
286 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
287
288struct bnx2x_fastpath {
289
290 struct napi_struct napi;
291 struct host_status_block *status_blk;
292 dma_addr_t status_blk_mapping;
293
294 struct sw_tx_bd *tx_buf_ring;
295
296 union eth_tx_bd_types *tx_desc_ring;
297 dma_addr_t tx_desc_mapping;
298
299 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
300 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
301
302 struct eth_rx_bd *rx_desc_ring;
303 dma_addr_t rx_desc_mapping;
304
305 union eth_rx_cqe *rx_comp_ring;
306 dma_addr_t rx_comp_mapping;
307
308 /* SGE ring */
309 struct eth_rx_sge *rx_sge_ring;
310 dma_addr_t rx_sge_mapping;
311
312 u64 sge_mask[RX_SGE_MASK_LEN];
313
314 int state;
315#define BNX2X_FP_STATE_CLOSED 0
316#define BNX2X_FP_STATE_IRQ 0x80000
317#define BNX2X_FP_STATE_OPENING 0x90000
318#define BNX2X_FP_STATE_OPEN 0xa0000
319#define BNX2X_FP_STATE_HALTING 0xb0000
320#define BNX2X_FP_STATE_HALTED 0xc0000
321
322 u8 index; /* number in fp array */
323 u8 cl_id; /* eth client id */
324 u8 sb_id; /* status block number in HW */
325
326 union db_prod tx_db;
327
328 u16 tx_pkt_prod;
329 u16 tx_pkt_cons;
330 u16 tx_bd_prod;
331 u16 tx_bd_cons;
332 __le16 *tx_cons_sb;
333
334 __le16 fp_c_idx;
335 __le16 fp_u_idx;
336
337 u16 rx_bd_prod;
338 u16 rx_bd_cons;
339 u16 rx_comp_prod;
340 u16 rx_comp_cons;
341 u16 rx_sge_prod;
342 /* The last maximal completed SGE */
343 u16 last_max_sge;
344 __le16 *rx_cons_sb;
345 __le16 *rx_bd_cons_sb;
346
347
348 unsigned long tx_pkt,
349 rx_pkt,
350 rx_calls;
351
352 /* TPA related */
353 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
354 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
355#define BNX2X_TPA_START 1
356#define BNX2X_TPA_STOP 2
357 u8 disable_tpa;
358#ifdef BNX2X_STOP_ON_ERROR
359 u64 tpa_queue_used;
360#endif
361
362 struct tstorm_per_client_stats old_tclient;
363 struct ustorm_per_client_stats old_uclient;
364 struct xstorm_per_client_stats old_xclient;
365 struct bnx2x_eth_q_stats eth_q_stats;
366
367 /* The size is calculated using the following:
368 sizeof name field from netdev structure +
369 4 ('-Xx-' string) +
370 4 (for the digits and to make it DWORD aligned) */
371#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
372 char name[FP_NAME_SIZE];
373 struct bnx2x *bp; /* parent */
374};
375
376#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
377
378
379/* MC hsi */
380#define MAX_FETCH_BD 13 /* HW max BDs per packet */
381#define RX_COPY_THRESH 92
382
383#define NUM_TX_RINGS 16
384#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
385#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
386#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
387#define MAX_TX_BD (NUM_TX_BD - 1)
388#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
389#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
390 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
391#define TX_BD(x) ((x) & MAX_TX_BD)
392#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
393
394/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
395#define NUM_RX_RINGS 8
396#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
397#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
398#define RX_DESC_MASK (RX_DESC_CNT - 1)
399#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
400#define MAX_RX_BD (NUM_RX_BD - 1)
401#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
402#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
403 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
404#define RX_BD(x) ((x) & MAX_RX_BD)
405
406/* As long as CQE is 4 times bigger than BD entry we have to allocate
407 4 times more pages for CQ ring in order to keep it balanced with
408 BD ring */
409#define NUM_RCQ_RINGS (NUM_RX_RINGS * 4)
410#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
411#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
412#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
413#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
414#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
415#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
416 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
417#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
418
419
420/* This is needed for determining of last_max */
421#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
422
423#define __SGE_MASK_SET_BIT(el, bit) \
424 do { \
425 el = ((el) | ((u64)0x1 << (bit))); \
426 } while (0)
427
428#define __SGE_MASK_CLEAR_BIT(el, bit) \
429 do { \
430 el = ((el) & (~((u64)0x1 << (bit)))); \
431 } while (0)
432
433#define SGE_MASK_SET_BIT(fp, idx) \
434 __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
435 ((idx) & RX_SGE_MASK_ELEM_MASK))
436
437#define SGE_MASK_CLEAR_BIT(fp, idx) \
438 __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
439 ((idx) & RX_SGE_MASK_ELEM_MASK))
440
441
442/* used on a CID received from the HW */
443#define SW_CID(x) (le32_to_cpu(x) & \
444 (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
445#define CQE_CMD(x) (le32_to_cpu(x) >> \
446 COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
447
448#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
449 le32_to_cpu((bd)->addr_lo))
450#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
451
452
453#define DPM_TRIGER_TYPE 0x40
454#define DOORBELL(bp, cid, val) \
455 do { \
456 writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \
457 DPM_TRIGER_TYPE); \
458 } while (0)
459
460
461/* TX CSUM helpers */
462#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
463 skb->csum_offset)
464#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
465 skb->csum_offset))
466
467#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
468
469#define XMIT_PLAIN 0
470#define XMIT_CSUM_V4 0x1
471#define XMIT_CSUM_V6 0x2
472#define XMIT_CSUM_TCP 0x4
473#define XMIT_GSO_V4 0x8
474#define XMIT_GSO_V6 0x10
475
476#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
477#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
478
479
480/* stuff added to make the code fit 80Col */
481
482#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
483
484#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
485#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
486#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
487 (TPA_TYPE_START | TPA_TYPE_END))
488
489#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
490
491#define BNX2X_IP_CSUM_ERR(cqe) \
492 (!((cqe)->fast_path_cqe.status_flags & \
493 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
494 ((cqe)->fast_path_cqe.type_error_flags & \
495 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
496
497#define BNX2X_L4_CSUM_ERR(cqe) \
498 (!((cqe)->fast_path_cqe.status_flags & \
499 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
500 ((cqe)->fast_path_cqe.type_error_flags & \
501 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
502
503#define BNX2X_RX_CSUM_OK(cqe) \
504 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
505
506#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
507 (((le16_to_cpu(flags) & \
508 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
509 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
510 == PRS_FLAG_OVERETH_IPV4)
511#define BNX2X_RX_SUM_FIX(cqe) \
512 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
513
514
515#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
516#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
517
518#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
519#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
520#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
521
522#define BNX2X_RX_SB_INDEX \
523 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX])
524
525#define BNX2X_RX_SB_BD_INDEX \
526 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
527
528#define BNX2X_RX_SB_INDEX_NUM \
529 (((U_SB_ETH_RX_CQ_INDEX << \
530 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
531 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
532 ((U_SB_ETH_RX_BD_INDEX << \
533 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
534 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
535
536#define BNX2X_TX_SB_INDEX \
537 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX])
538
539
540/* end of fast path */
541
542/* common */
543
544struct bnx2x_common {
545
546 u32 chip_id;
547/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
548#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
549
550#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
551#define CHIP_NUM_57710 0x164e
552#define CHIP_NUM_57711 0x164f
553#define CHIP_NUM_57711E 0x1650
554#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
555#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
556#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
557#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
558 CHIP_IS_57711E(bp))
559#define IS_E1H_OFFSET CHIP_IS_E1H(bp)
560
561#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
562#define CHIP_REV_Ax 0x00000000
563/* assume maximum 5 revisions */
564#define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000)
565/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
566#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
567 !(CHIP_REV(bp) & 0x00001000))
568/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
569#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
570 (CHIP_REV(bp) & 0x00001000))
571
572#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
573 ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
574
575#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
576#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
577
578 int flash_size;
579#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
580#define NVRAM_TIMEOUT_COUNT 30000
581#define NVRAM_PAGE_SIZE 256
582
583 u32 shmem_base;
584 u32 shmem2_base;
585
586 u32 hw_config;
587
588 u32 bc_ver;
589};
590
591
592/* end of common */
593
594/* port */
595
596struct nig_stats {
597 u32 brb_discard;
598 u32 brb_packet;
599 u32 brb_truncate;
600 u32 flow_ctrl_discard;
601 u32 flow_ctrl_octets;
602 u32 flow_ctrl_packet;
603 u32 mng_discard;
604 u32 mng_octet_inp;
605 u32 mng_octet_out;
606 u32 mng_packet_inp;
607 u32 mng_packet_out;
608 u32 pbf_octets;
609 u32 pbf_packet;
610 u32 safc_inp;
611 u32 egress_mac_pkt0_lo;
612 u32 egress_mac_pkt0_hi;
613 u32 egress_mac_pkt1_lo;
614 u32 egress_mac_pkt1_hi;
615};
616
617struct bnx2x_port {
618 u32 pmf;
619
620 u32 link_config;
621
622 u32 supported;
623/* link settings - missing defines */
624#define SUPPORTED_2500baseX_Full (1 << 15)
625
626 u32 advertising;
627/* link settings - missing defines */
628#define ADVERTISED_2500baseX_Full (1 << 15)
629
630 u32 phy_addr;
631
632 /* used to synchronize phy accesses */
633 struct mutex phy_mutex;
634 int need_hw_lock;
635
636 u32 port_stx;
637
638 struct nig_stats old_nig_stats;
639};
640
641/* end of port */
642
643
644enum bnx2x_stats_event {
645 STATS_EVENT_PMF = 0,
646 STATS_EVENT_LINK_UP,
647 STATS_EVENT_UPDATE,
648 STATS_EVENT_STOP,
649 STATS_EVENT_MAX
650};
651
652enum bnx2x_stats_state {
653 STATS_STATE_DISABLED = 0,
654 STATS_STATE_ENABLED,
655 STATS_STATE_MAX
656};
657
658struct bnx2x_eth_stats {
659 u32 total_bytes_received_hi;
660 u32 total_bytes_received_lo;
661 u32 total_bytes_transmitted_hi;
662 u32 total_bytes_transmitted_lo;
663 u32 total_unicast_packets_received_hi;
664 u32 total_unicast_packets_received_lo;
665 u32 total_multicast_packets_received_hi;
666 u32 total_multicast_packets_received_lo;
667 u32 total_broadcast_packets_received_hi;
668 u32 total_broadcast_packets_received_lo;
669 u32 total_unicast_packets_transmitted_hi;
670 u32 total_unicast_packets_transmitted_lo;
671 u32 total_multicast_packets_transmitted_hi;
672 u32 total_multicast_packets_transmitted_lo;
673 u32 total_broadcast_packets_transmitted_hi;
674 u32 total_broadcast_packets_transmitted_lo;
675 u32 valid_bytes_received_hi;
676 u32 valid_bytes_received_lo;
677
678 u32 error_bytes_received_hi;
679 u32 error_bytes_received_lo;
680 u32 etherstatsoverrsizepkts_hi;
681 u32 etherstatsoverrsizepkts_lo;
682 u32 no_buff_discard_hi;
683 u32 no_buff_discard_lo;
684
685 u32 rx_stat_ifhcinbadoctets_hi;
686 u32 rx_stat_ifhcinbadoctets_lo;
687 u32 tx_stat_ifhcoutbadoctets_hi;
688 u32 tx_stat_ifhcoutbadoctets_lo;
689 u32 rx_stat_dot3statsfcserrors_hi;
690 u32 rx_stat_dot3statsfcserrors_lo;
691 u32 rx_stat_dot3statsalignmenterrors_hi;
692 u32 rx_stat_dot3statsalignmenterrors_lo;
693 u32 rx_stat_dot3statscarriersenseerrors_hi;
694 u32 rx_stat_dot3statscarriersenseerrors_lo;
695 u32 rx_stat_falsecarriererrors_hi;
696 u32 rx_stat_falsecarriererrors_lo;
697 u32 rx_stat_etherstatsundersizepkts_hi;
698 u32 rx_stat_etherstatsundersizepkts_lo;
699 u32 rx_stat_dot3statsframestoolong_hi;
700 u32 rx_stat_dot3statsframestoolong_lo;
701 u32 rx_stat_etherstatsfragments_hi;
702 u32 rx_stat_etherstatsfragments_lo;
703 u32 rx_stat_etherstatsjabbers_hi;
704 u32 rx_stat_etherstatsjabbers_lo;
705 u32 rx_stat_maccontrolframesreceived_hi;
706 u32 rx_stat_maccontrolframesreceived_lo;
707 u32 rx_stat_bmac_xpf_hi;
708 u32 rx_stat_bmac_xpf_lo;
709 u32 rx_stat_bmac_xcf_hi;
710 u32 rx_stat_bmac_xcf_lo;
711 u32 rx_stat_xoffstateentered_hi;
712 u32 rx_stat_xoffstateentered_lo;
713 u32 rx_stat_xonpauseframesreceived_hi;
714 u32 rx_stat_xonpauseframesreceived_lo;
715 u32 rx_stat_xoffpauseframesreceived_hi;
716 u32 rx_stat_xoffpauseframesreceived_lo;
717 u32 tx_stat_outxonsent_hi;
718 u32 tx_stat_outxonsent_lo;
719 u32 tx_stat_outxoffsent_hi;
720 u32 tx_stat_outxoffsent_lo;
721 u32 tx_stat_flowcontroldone_hi;
722 u32 tx_stat_flowcontroldone_lo;
723 u32 tx_stat_etherstatscollisions_hi;
724 u32 tx_stat_etherstatscollisions_lo;
725 u32 tx_stat_dot3statssinglecollisionframes_hi;
726 u32 tx_stat_dot3statssinglecollisionframes_lo;
727 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
728 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
729 u32 tx_stat_dot3statsdeferredtransmissions_hi;
730 u32 tx_stat_dot3statsdeferredtransmissions_lo;
731 u32 tx_stat_dot3statsexcessivecollisions_hi;
732 u32 tx_stat_dot3statsexcessivecollisions_lo;
733 u32 tx_stat_dot3statslatecollisions_hi;
734 u32 tx_stat_dot3statslatecollisions_lo;
735 u32 tx_stat_etherstatspkts64octets_hi;
736 u32 tx_stat_etherstatspkts64octets_lo;
737 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
738 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
739 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
740 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
741 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
742 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
743 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
744 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
745 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
746 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
747 u32 tx_stat_etherstatspktsover1522octets_hi;
748 u32 tx_stat_etherstatspktsover1522octets_lo;
749 u32 tx_stat_bmac_2047_hi;
750 u32 tx_stat_bmac_2047_lo;
751 u32 tx_stat_bmac_4095_hi;
752 u32 tx_stat_bmac_4095_lo;
753 u32 tx_stat_bmac_9216_hi;
754 u32 tx_stat_bmac_9216_lo;
755 u32 tx_stat_bmac_16383_hi;
756 u32 tx_stat_bmac_16383_lo;
757 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
758 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
759 u32 tx_stat_bmac_ufl_hi;
760 u32 tx_stat_bmac_ufl_lo;
761
762 u32 pause_frames_received_hi;
763 u32 pause_frames_received_lo;
764 u32 pause_frames_sent_hi;
765 u32 pause_frames_sent_lo;
766
767 u32 etherstatspkts1024octetsto1522octets_hi;
768 u32 etherstatspkts1024octetsto1522octets_lo;
769 u32 etherstatspktsover1522octets_hi;
770 u32 etherstatspktsover1522octets_lo;
771
772 u32 brb_drop_hi;
773 u32 brb_drop_lo;
774 u32 brb_truncate_hi;
775 u32 brb_truncate_lo;
776
777 u32 mac_filter_discard;
778 u32 xxoverflow_discard;
779 u32 brb_truncate_discard;
780 u32 mac_discard;
781
782 u32 driver_xoff;
783 u32 rx_err_discard_pkt;
784 u32 rx_skb_alloc_failed;
785 u32 hw_csum_err;
786
787 u32 nig_timer_max;
788};
789
790#define BNX2X_NUM_STATS 43
791#define STATS_OFFSET32(stat_name) \
792 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
793
794
795#ifdef BCM_CNIC
796#define MAX_CONTEXT 15
797#else
798#define MAX_CONTEXT 16
799#endif
800
801union cdu_context {
802 struct eth_context eth;
803 char pad[1024];
804};
805
806#define MAX_DMAE_C 8
807
808/* DMA memory not used in fastpath */
809struct bnx2x_slowpath {
810 union cdu_context context[MAX_CONTEXT];
811 struct eth_stats_query fw_stats;
812 struct mac_configuration_cmd mac_config;
813 struct mac_configuration_cmd mcast_config;
814
815 /* used by dmae command executer */
816 struct dmae_command dmae[MAX_DMAE_C];
817
818 u32 stats_comp;
819 union mac_stats mac_stats;
820 struct nig_stats nig_stats;
821 struct host_port_stats port_stats;
822 struct host_func_stats func_stats;
823 struct host_func_stats func_stats_base;
824
825 u32 wb_comp;
826 u32 wb_data[4];
827};
828
829#define bnx2x_sp(bp, var) (&bp->slowpath->var)
830#define bnx2x_sp_mapping(bp, var) \
831 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
832
833
834/* attn group wiring */
835#define MAX_DYNAMIC_ATTN_GRPS 8
836
837struct attn_route {
838 u32 sig[4];
839};
840
841typedef enum {
842 BNX2X_RECOVERY_DONE,
843 BNX2X_RECOVERY_INIT,
844 BNX2X_RECOVERY_WAIT,
845} bnx2x_recovery_state_t;
846
847struct bnx2x {
848 /* Fields used in the tx and intr/napi performance paths
849 * are grouped together in the beginning of the structure
850 */
851 struct bnx2x_fastpath fp[MAX_CONTEXT];
852 void __iomem *regview;
853 void __iomem *doorbells;
854#ifdef BCM_CNIC
855#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
856#else
857#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
858#endif
859
860 struct net_device *dev;
861 struct pci_dev *pdev;
862
863 atomic_t intr_sem;
864
865 bnx2x_recovery_state_t recovery_state;
866 int is_leader;
867#ifdef BCM_CNIC
868 struct msix_entry msix_table[MAX_CONTEXT+2];
869#else
870 struct msix_entry msix_table[MAX_CONTEXT+1];
871#endif
872#define INT_MODE_INTx 1
873#define INT_MODE_MSI 2
874
875 int tx_ring_size;
876
877#ifdef BCM_VLAN
878 struct vlan_group *vlgrp;
879#endif
880
881 u32 rx_csum;
882 u32 rx_buf_size;
883#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
884#define ETH_MIN_PACKET_SIZE 60
885#define ETH_MAX_PACKET_SIZE 1500
886#define ETH_MAX_JUMBO_PACKET_SIZE 9600
887
888 /* Max supported alignment is 256 (8 shift) */
889#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
890 L1_CACHE_SHIFT : 8)
891#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
892
893 struct host_def_status_block *def_status_blk;
894#define DEF_SB_ID 16
895 __le16 def_c_idx;
896 __le16 def_u_idx;
897 __le16 def_x_idx;
898 __le16 def_t_idx;
899 __le16 def_att_idx;
900 u32 attn_state;
901 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
902
903 /* slow path ring */
904 struct eth_spe *spq;
905 dma_addr_t spq_mapping;
906 u16 spq_prod_idx;
907 struct eth_spe *spq_prod_bd;
908 struct eth_spe *spq_last_bd;
909 __le16 *dsb_sp_prod;
910 u16 spq_left; /* serialize spq */
911 /* used to synchronize spq accesses */
912 spinlock_t spq_lock;
913
914 /* Flags for marking that there is a STAT_QUERY or
915 SET_MAC ramrod pending */
916 int stats_pending;
917 int set_mac_pending;
918
919 /* End of fields used in the performance code paths */
920
921 int panic;
922 int msg_enable;
923
924 u32 flags;
925#define PCIX_FLAG 1
926#define PCI_32BIT_FLAG 2
927#define ONE_PORT_FLAG 4
928#define NO_WOL_FLAG 8
929#define USING_DAC_FLAG 0x10
930#define USING_MSIX_FLAG 0x20
931#define USING_MSI_FLAG 0x40
932#define TPA_ENABLE_FLAG 0x80
933#define NO_MCP_FLAG 0x100
934#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
935#define HW_VLAN_TX_FLAG 0x400
936#define HW_VLAN_RX_FLAG 0x800
937#define MF_FUNC_DIS 0x1000
938
939 int func;
940#define BP_PORT(bp) (bp->func % PORT_MAX)
941#define BP_FUNC(bp) (bp->func)
942#define BP_E1HVN(bp) (bp->func >> 1)
943#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
944
945#ifdef BCM_CNIC
946#define BCM_CNIC_CID_START 16
947#define BCM_ISCSI_ETH_CL_ID 17
948#endif
949
950 int pm_cap;
951 int pcie_cap;
952 int mrrs;
953
954 struct delayed_work sp_task;
955 struct delayed_work reset_task;
956 struct timer_list timer;
957 int current_interval;
958
959 u16 fw_seq;
960 u16 fw_drv_pulse_wr_seq;
961 u32 func_stx;
962
963 struct link_params link_params;
964 struct link_vars link_vars;
965 struct mdio_if_info mdio;
966
967 struct bnx2x_common common;
968 struct bnx2x_port port;
969
970 struct cmng_struct_per_port cmng;
971 u32 vn_weight_sum;
972
973 u32 mf_config;
974 u16 e1hov;
975 u8 e1hmf;
976#define IS_E1HMF(bp) (bp->e1hmf != 0)
977
978 u8 wol;
979
980 int rx_ring_size;
981
982 u16 tx_quick_cons_trip_int;
983 u16 tx_quick_cons_trip;
984 u16 tx_ticks_int;
985 u16 tx_ticks;
986
987 u16 rx_quick_cons_trip_int;
988 u16 rx_quick_cons_trip;
989 u16 rx_ticks_int;
990 u16 rx_ticks;
991/* Maximal coalescing timeout in us */
992#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
993
994 u32 lin_cnt;
995
996 int state;
997#define BNX2X_STATE_CLOSED 0
998#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
999#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
1000#define BNX2X_STATE_OPEN 0x3000
1001#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
1002#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
1003#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
1004#define BNX2X_STATE_DIAG 0xe000
1005#define BNX2X_STATE_ERROR 0xf000
1006
1007 int multi_mode;
1008 int num_queues;
1009
1010 u32 rx_mode;
1011#define BNX2X_RX_MODE_NONE 0
1012#define BNX2X_RX_MODE_NORMAL 1
1013#define BNX2X_RX_MODE_ALLMULTI 2
1014#define BNX2X_RX_MODE_PROMISC 3
1015#define BNX2X_MAX_MULTICAST 64
1016#define BNX2X_MAX_EMUL_MULTI 16
1017
1018 u32 rx_mode_cl_mask;
1019
1020 dma_addr_t def_status_blk_mapping;
1021
1022 struct bnx2x_slowpath *slowpath;
1023 dma_addr_t slowpath_mapping;
1024
1025 int dropless_fc;
1026
1027#ifdef BCM_CNIC
1028 u32 cnic_flags;
1029#define BNX2X_CNIC_FLAG_MAC_SET 1
1030
1031 void *t1;
1032 dma_addr_t t1_mapping;
1033 void *t2;
1034 dma_addr_t t2_mapping;
1035 void *timers;
1036 dma_addr_t timers_mapping;
1037 void *qm;
1038 dma_addr_t qm_mapping;
1039 struct cnic_ops *cnic_ops;
1040 void *cnic_data;
1041 u32 cnic_tag;
1042 struct cnic_eth_dev cnic_eth_dev;
1043 struct host_status_block *cnic_sb;
1044 dma_addr_t cnic_sb_mapping;
1045#define CNIC_SB_ID(bp) BP_L_ID(bp)
1046 struct eth_spe *cnic_kwq;
1047 struct eth_spe *cnic_kwq_prod;
1048 struct eth_spe *cnic_kwq_cons;
1049 struct eth_spe *cnic_kwq_last;
1050 u16 cnic_kwq_pending;
1051 u16 cnic_spq_pending;
1052 struct mutex cnic_mutex;
1053 u8 iscsi_mac[6];
1054#endif
1055
1056 int dmae_ready;
1057 /* used to synchronize dmae accesses */
1058 struct mutex dmae_mutex;
1059
1060 /* used to protect the FW mail box */
1061 struct mutex fw_mb_mutex;
1062
1063 /* used to synchronize stats collecting */
1064 int stats_state;
1065 /* used by dmae command loader */
1066 struct dmae_command stats_dmae;
1067 int executer_idx;
1068
1069 u16 stats_counter;
1070 struct bnx2x_eth_stats eth_stats;
1071
1072 struct z_stream_s *strm;
1073 void *gunzip_buf;
1074 dma_addr_t gunzip_mapping;
1075 int gunzip_outlen;
1076#define FW_BUF_SIZE 0x8000
1077#define GUNZIP_BUF(bp) (bp->gunzip_buf)
1078#define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
1079#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
1080
1081 struct raw_op *init_ops;
1082 /* Init blocks offsets inside init_ops */
1083 u16 *init_ops_offsets;
1084 /* Data blob - has 32 bit granularity */
1085 u32 *init_data;
1086 /* Zipped PRAM blobs - raw data */
1087 const u8 *tsem_int_table_data;
1088 const u8 *tsem_pram_data;
1089 const u8 *usem_int_table_data;
1090 const u8 *usem_pram_data;
1091 const u8 *xsem_int_table_data;
1092 const u8 *xsem_pram_data;
1093 const u8 *csem_int_table_data;
1094 const u8 *csem_pram_data;
1095#define INIT_OPS(bp) (bp->init_ops)
1096#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
1097#define INIT_DATA(bp) (bp->init_data)
1098#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
1099#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
1100#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
1101#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
1102#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
1103#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
1104#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
1105#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
1106
1107 char fw_ver[32];
1108 const struct firmware *firmware;
1109};
1110
1111
1112#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
1113 : MAX_CONTEXT)
1114#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1115#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1116
1117#define for_each_queue(bp, var) \
1118 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
1119#define for_each_nondefault_queue(bp, var) \
1120 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
1121
1122
1123void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
1124void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
1125 u32 len32);
1126int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
1127int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1128int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1129u32 bnx2x_fw_command(struct bnx2x *bp, u32 command);
1130void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
1131void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
1132 u32 addr, u32 len);
1133
1134static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1135 int wait)
1136{
1137 u32 val;
1138
1139 do {
1140 val = REG_RD(bp, reg);
1141 if (val == expected)
1142 break;
1143 ms -= wait;
1144 msleep(wait);
1145
1146 } while (ms > 0);
1147
1148 return val;
1149}
1150
1151
1152/* load/unload mode */
1153#define LOAD_NORMAL 0
1154#define LOAD_OPEN 1
1155#define LOAD_DIAG 2
1156#define UNLOAD_NORMAL 0
1157#define UNLOAD_CLOSE 1
1158#define UNLOAD_RECOVERY 2
1159
1160
1161/* DMAE command defines */
1162#define DMAE_CMD_SRC_PCI 0
1163#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
1164
1165#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
1166#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
1167
1168#define DMAE_CMD_C_DST_PCI 0
1169#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
1170
1171#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
1172
1173#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
1174#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
1175#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
1176#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
1177
1178#define DMAE_CMD_PORT_0 0
1179#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
1180
1181#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
1182#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
1183#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
1184
1185#define DMAE_LEN32_RD_MAX 0x80
1186#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
1187
1188#define DMAE_COMP_VAL 0xe0d0d0ae
1189
1190#define MAX_DMAE_C_PER_PORT 8
1191#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1192 BP_E1HVN(bp))
1193#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1194 E1HVN_MAX)
1195
1196
1197/* PCIE link and speed */
1198#define PCICFG_LINK_WIDTH 0x1f00000
1199#define PCICFG_LINK_WIDTH_SHIFT 20
1200#define PCICFG_LINK_SPEED 0xf0000
1201#define PCICFG_LINK_SPEED_SHIFT 16
1202
1203
1204#define BNX2X_NUM_TESTS 7
1205
1206#define BNX2X_PHY_LOOPBACK 0
1207#define BNX2X_MAC_LOOPBACK 1
1208#define BNX2X_PHY_LOOPBACK_FAILED 1
1209#define BNX2X_MAC_LOOPBACK_FAILED 2
1210#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
1211 BNX2X_PHY_LOOPBACK_FAILED)
1212
1213
1214#define STROM_ASSERT_ARRAY_SIZE 50
1215
1216
1217/* must be used on a CID before placing it on a HW ring */
1218#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1219 (BP_E1HVN(bp) << 17) | (x))
1220
1221#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
1222#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1223
1224
1225#define BNX2X_BTR 1
1226#define MAX_SPQ_PENDING 8
1227
1228
1229/* CMNG constants
1230 derived from lab experiments, and not from system spec calculations !!! */
1231#define DEF_MIN_RATE 100
1232/* resolution of the rate shaping timer - 100 usec */
1233#define RS_PERIODIC_TIMEOUT_USEC 100
1234/* resolution of fairness algorithm in usecs -
1235 coefficient for calculating the actual t fair */
1236#define T_FAIR_COEF 10000000
1237/* number of bytes in single QM arbitration cycle -
1238 coefficient for calculating the fairness timer */
1239#define QM_ARB_BYTES 40000
1240#define FAIR_MEM 2
1241
1242
1243#define ATTN_NIG_FOR_FUNC (1L << 8)
1244#define ATTN_SW_TIMER_4_FUNC (1L << 9)
1245#define GPIO_2_FUNC (1L << 10)
1246#define GPIO_3_FUNC (1L << 11)
1247#define GPIO_4_FUNC (1L << 12)
1248#define ATTN_GENERAL_ATTN_1 (1L << 13)
1249#define ATTN_GENERAL_ATTN_2 (1L << 14)
1250#define ATTN_GENERAL_ATTN_3 (1L << 15)
1251#define ATTN_GENERAL_ATTN_4 (1L << 13)
1252#define ATTN_GENERAL_ATTN_5 (1L << 14)
1253#define ATTN_GENERAL_ATTN_6 (1L << 15)
1254
1255#define ATTN_HARD_WIRED_MASK 0xff00
1256#define ATTENTION_ID 4
1257
1258
1259/* stuff added to make the code fit 80Col */
1260
1261#define BNX2X_PMF_LINK_ASSERT \
1262 GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
1263
1264#define BNX2X_MC_ASSERT_BITS \
1265 (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1266 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1267 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1268 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
1269
1270#define BNX2X_MCP_ASSERT \
1271 GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
1272
1273#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
1274#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
1275 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
1276 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
1277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
1278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
1279 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
1280
1281#define HW_INTERRUT_ASSERT_SET_0 \
1282 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
1283 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
1284 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
1285 AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
1286#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
1287 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
1288 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
1289 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
1290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR)
1291#define HW_INTERRUT_ASSERT_SET_1 \
1292 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
1293 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
1294 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
1295 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
1296 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
1297 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
1298 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
1299 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
1300 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
1301 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
1302 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
1303#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
1304 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
1305 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
1306 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
1307 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
1308 AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
1309 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
1310 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
1311 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
1312 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
1313 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR)
1314#define HW_INTERRUT_ASSERT_SET_2 \
1315 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
1316 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
1317 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
1318 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
1319 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
1320#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
1321 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
1322 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
1323 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
1324 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
1325 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1326 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1327
1328#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1329 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1330 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1331 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1332
1333#define RSS_FLAGS(bp) \
1334 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
1335 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
1336 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
1337 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
1338 (bp->multi_mode << \
1339 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1340#define MULTI_MASK 0x7f
1341
1342
1343#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
1344#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
1345#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
1346#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
1347
1348#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
1349
1350#define BNX2X_SP_DSB_INDEX \
1351(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX])
1352
1353
1354#define CAM_IS_INVALID(x) \
1355(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1356
1357#define CAM_INVALIDATE(x) \
1358 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1359
1360
1361/* Number of u32 elements in MC hash array */
1362#define MC_HASH_SIZE 8
1363#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
1364 TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
1365
1366
1367#ifndef PXP2_REG_PXP2_INT_STS
1368#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1369#endif
1370
1371#define BNX2X_VPD_LEN 128
1372#define VENDOR_ID_LEN 4
1373
1374/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1375
1376#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
new file mode 100644
index 000000000000..3bb9a91bb3f7
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -0,0 +1,534 @@
1/* bnx2x_dump.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10
11/* This struct holds a signature to ensure the dump returned from the driver
12 * match the meta data file inserted to grc_dump.tcl
13 * The signature is time stamp, diag version and grc_dump version
14 */
15
16#ifndef BNX2X_DUMP_H
17#define BNX2X_DUMP_H
18
19
20struct dump_sign {
21 u32 time_stamp;
22 u32 diag_ver;
23 u32 grc_dump_ver;
24};
25
26#define TSTORM_WAITP_ADDR 0x1b8a80
27#define CSTORM_WAITP_ADDR 0x238a80
28#define XSTORM_WAITP_ADDR 0x2b8a80
29#define USTORM_WAITP_ADDR 0x338a80
30#define TSTORM_CAM_MODE 0x1b1440
31
32#define RI_E1 0x1
33#define RI_E1H 0x2
34#define RI_ONLINE 0x100
35
36#define RI_E1_OFFLINE (RI_E1)
37#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
38#define RI_E1H_OFFLINE (RI_E1H)
39#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
40#define RI_ALL_OFFLINE (RI_E1 | RI_E1H)
41#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
42
43#define MAX_TIMER_PENDING 200
44#define TIMER_SCAN_DONT_CARE 0xFF
45
46
47struct dump_hdr {
48 u32 hdr_size; /* in dwords, excluding this field */
49 struct dump_sign dump_sign;
50 u32 xstorm_waitp;
51 u32 tstorm_waitp;
52 u32 ustorm_waitp;
53 u32 cstorm_waitp;
54 u16 info;
55 u8 idle_chk;
56 u8 reserved;
57};
58
59struct reg_addr {
60 u32 addr;
61 u32 size;
62 u16 info;
63};
64
65struct wreg_addr {
66 u32 addr;
67 u32 size;
68 u32 read_regs_count;
69 const u32 *read_regs;
70 u16 info;
71};
72
73
74#define REGS_COUNT 558
75static const struct reg_addr reg_addrs[REGS_COUNT] = {
76 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
77 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
78 { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
79 { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
80 { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
81 { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
82 { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
83 { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
84 { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
85 { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
86 { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
87 { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
88 { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
89 { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
90 { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
91 { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
92 { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
93 { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
94 { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
95 { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
96 { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
97 { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
98 { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
99 { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
100 { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
101 { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
102 { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
103 { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
104 { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
105 { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
106 { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
107 { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
108 { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
109 { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
110 { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
111 { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
112 { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
113 { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
114 { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
115 { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
116 { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
117 { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
118 { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
119 { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
120 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
121 { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
122 { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
123 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
124 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
125 { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
126 { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
127 { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
128 { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
129 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
130 { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
131 { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
132 { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
133 { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
134 { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
135 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
136 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
137 { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
138 { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
139 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
140 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
141 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
142 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
143 { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
144 { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
145 { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
146 { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
147 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
148 { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
149 { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
150 { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
151 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
152 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
153 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
154 { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
155 { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
156 { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
157 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
158 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
159 { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
160 { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
161 { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
162 { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
163 { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
164 { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
165 { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
166 { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
167 { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
168 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
169 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
170 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
171 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
172 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
173 { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
174 { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
175 { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
176 { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
177 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
178 { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
179 { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
180 { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
181 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
182 { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
183 { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
184 { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
185 { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
186 { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
187 { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
188 { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
189 { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
190 { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
191 { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
192 { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
193 { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
194 { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
195 { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
196 { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
197 { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
198 { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
199 { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
200 { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
201 { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
202 { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
203 { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
204 { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
205 { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
206 { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
207 { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
208 { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
209 { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
210 { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
211 { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
212 { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
213 { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
214 { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
215 { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
216 { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
217 { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
218 { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
219 { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
220 { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
221 { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
222 { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
223 { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
224 { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
225 { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
226 { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
227 { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
228 { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
229 { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
230 { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
231 { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
232 { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
233 { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
234 { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
235 { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
236 { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
237 { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
238 { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
239 { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
240 { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
241 { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
242 { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
243 { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
244 { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
245 { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
246 { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
247 { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
248 { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
249 { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
250 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
251 { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
252 { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
253 { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
254 { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
255 { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
256 { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
257 { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
258 { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
259 { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
260 { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
261 { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
262 { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
263 { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
264 { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
265 { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
266 { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
267 { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
268 { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
269 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
270 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
271 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
272 { 0x164238, 1, RI_ALL_ONLINE }, { 0x164240, 1, RI_ALL_ONLINE },
273 { 0x164248, 1, RI_ALL_ONLINE }, { 0x164250, 1, RI_ALL_ONLINE },
274 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
275 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
276 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
277 { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
278 { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
279 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
280 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
281 { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
282 { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
283 { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
284 { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
285 { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
286 { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
287 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
288 { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
289 { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
290 { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
291 { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
292 { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
293 { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
294 { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
295 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
296 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
297 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
298 { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
299 { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
300 { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
301 { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
302 { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
303 { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
304 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
305 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
306 { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
307 { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
308 { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
309 { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
310 { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
311 { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
312 { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
313 { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
314 { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
315 { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
316 { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
317 { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
318 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
319 { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
320 { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
321 { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
322 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
323 { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
324 { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
325 { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
326 { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
327 { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
328 { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
329 { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
330 { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
331 { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
332 { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
333 { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
334 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
335 { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
336 { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
337 { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
338 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
339 { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
340 { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
341 { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
342 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
343 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
344 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
345 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
346 { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
347 { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
348 { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
349 { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
350 { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
351 { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
352 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
353 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
354 { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
355};
356
357
358#define IDLE_REGS_COUNT 277
359static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
360 { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
361 { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
362 { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
363 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
364 { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
365 { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
366 { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
367 { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
368 { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
369 { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
370 { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
371 { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
372 { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
373 { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
374 { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
375 { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
376 { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
377 { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
378 { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
379 { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
380 { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
381 { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
382 { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
383 { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
384 { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
385 { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
386 { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
387 { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
388 { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
389 { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
390 { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
391 { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
392 { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
393 { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
394 { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
395 { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
396 { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
397 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
398 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
399 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
400 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
401 { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
402 { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
403 { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
404 { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
405 { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
406 { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
407 { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
408 { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
409 { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
410 { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
411 { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
412 { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
413 { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
414 { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
415 { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
416 { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
417 { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
418 { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
419 { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
420 { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
421 { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
422 { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
423 { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
424 { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
425 { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
426 { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
427 { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
428 { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
429 { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
430 { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
431 { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
432 { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
433 { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
434 { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
435 { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
436 { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
437 { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
438 { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
439 { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
440 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
441 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
442 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
443 { 0x120848, 1, RI_ALL_ONLINE }, { 0x120850, 1, RI_ALL_ONLINE },
444 { 0x120858, 1, RI_ALL_ONLINE }, { 0x120860, 1, RI_ALL_ONLINE },
445 { 0x120868, 1, RI_ALL_ONLINE }, { 0x120870, 1, RI_ALL_ONLINE },
446 { 0x120878, 1, RI_ALL_ONLINE }, { 0x120880, 1, RI_ALL_ONLINE },
447 { 0x120888, 1, RI_ALL_ONLINE }, { 0x120890, 1, RI_ALL_ONLINE },
448 { 0x120898, 1, RI_ALL_ONLINE }, { 0x1208a0, 1, RI_ALL_ONLINE },
449 { 0x1208a8, 1, RI_ALL_ONLINE }, { 0x1208b0, 1, RI_ALL_ONLINE },
450 { 0x1208b8, 1, RI_ALL_ONLINE }, { 0x1208c0, 1, RI_ALL_ONLINE },
451 { 0x1208c8, 1, RI_ALL_ONLINE }, { 0x1208d0, 1, RI_ALL_ONLINE },
452 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
453 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
454 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
455 { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
456 { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
457 { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
458 { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
459 { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
460 { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
461 { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
462 { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
463 { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
464 { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
465 { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
466 { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
467 { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
468 { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
469 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
470 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
471 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
472 { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
473 { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
474 { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
475 { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
476 { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
477 { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
478 { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
479 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
480 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
481 { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
482 { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
483 { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
484 { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
485 { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
486 { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
487 { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
488 { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
489 { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
490 { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
491 { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
492 { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
493 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
494 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
495 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
496 { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
497 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
498 { 0x3380c0, 1, RI_ALL_ONLINE }
499};
500
501#define WREGS_COUNT_E1 1
502static const u32 read_reg_e1_0[] = { 0x1b1000 };
503
504static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
505 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
506};
507
508
509#define WREGS_COUNT_E1H 1
510static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
511
512static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
513 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
514};
515
516
517static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
518
519
520#define TIMER_REGS_COUNT_E1 2
521static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
522 { 0x164014, 0x164018 };
523static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
524 { 0x1640d0, 0x1640d4 };
525
526
527#define TIMER_REGS_COUNT_E1H 2
528static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
529 { 0x164014, 0x164018 };
530static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
531 { 0x1640d0, 0x1640d4 };
532
533
534#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
new file mode 100644
index 000000000000..08d71bf438d6
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -0,0 +1,594 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
12 (IS_E1H_OFFSET ? 0x7000 : 0x1000)
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \
14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
15#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \
16 (IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \
17 ((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \
18 0x40) + (index * 0x4)))
19#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \
20 (IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \
21 ((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \
22 0x80) + (index * 0x4)))
23#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \
24 (IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \
25 ((function&1) * 0x100)) : (0x3540 + (function * 0x40)))
26#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \
27 (IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \
28 ((function&1) * 0x200)) : (0x35c0 + (function * 0x80)))
29#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \
30 (IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \
31 ((function&1) * 0x100)) : (0x3548 + (function * 0x40)))
32#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \
33 (IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \
34 ((function&1) * 0x200)) : (0x35c8 + (function * 0x80)))
35#define CSTORM_FUNCTION_MODE_OFFSET \
36 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
37#define CSTORM_HC_BTR_C_OFFSET(port) \
38 (IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0)))
39#define CSTORM_HC_BTR_U_OFFSET(port) \
40 (IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0)))
41#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \
42 (IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \
43 (function * 0x8)))
44#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
45 (IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \
46 (function * 0x8)))
47#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \
48 (IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \
49 (0x2410 + (function * 0xc0) + (eqIdx * 0x18)))
50#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \
51 (IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \
52 (0x2414 + (function * 0xc0) + (eqIdx * 0x18)))
53#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \
54 (IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \
55 (0x241c + (function * 0xc0) + (eqIdx * 0x18)))
56#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \
57 (IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \
58 (0x2427 + (function * 0xc0) + (eqIdx * 0x18)))
59#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \
60 (IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \
61 (0x2412 + (function * 0xc0) + (eqIdx * 0x18)))
62#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \
63 (IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \
64 (0x2426 + (function * 0xc0) + (eqIdx * 0x18)))
65#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \
66 (IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \
67 (0x2424 + (function * 0xc0) + (eqIdx * 0x18)))
68#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
69 (IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \
70 (function * 0x8)))
71#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
72 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \
73 (function * 0x8)))
74#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
75 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \
76 (function * 0x8)))
77#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
78 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \
79 (function * 0x8)))
80#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \
81 (IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \
82 (index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \
83 (index * 0x4)))
84#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \
85 (IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \
86 (index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \
87 (index * 0x4)))
88#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \
89 (IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \
90 (index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \
91 (index * 0x4)))
92#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \
93 (IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \
94 (index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \
95 (index * 0x4)))
96#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \
97 (IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \
98 (0x3040 + (port * 0x280) + (cpu_id * 0x28)))
99#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
100 (IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
101 (0x4000 + (port * 0x800) + (cpu_id * 0x80)))
102#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
103 (IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
104 (0x3048 + (port * 0x280) + (cpu_id * 0x28)))
105#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
106 (IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
107 (0x4008 + (port * 0x800) + (cpu_id * 0x80)))
108#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
109#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
110#define CSTORM_STATS_FLAGS_OFFSET(function) \
111 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
112 (function * 0x8)))
113#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
114 (IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
115#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
116 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
117#define TSTORM_ASSERT_LIST_OFFSET(idx) \
118 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
119#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
120 (IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
121 : (0x9c0 + (port * 0x120) + (client_id * 0x10)))
122#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
123 (IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
124#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
125 (IS_E1H_OFFSET ? 0x1eda : 0xffffffff)
126#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
127 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
128 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
129 0x28) + (index * 0x4)))
130#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
131 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
132 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
133#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
134 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
135 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
136#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
137 (IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \
138 (function * 0x8)))
139#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
140 (IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \
141 (function * 0x40)))
142#define TSTORM_FUNCTION_MODE_OFFSET \
143 (IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff)
144#define TSTORM_HC_BTR_OFFSET(port) \
145 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
146#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
147 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
148 (function * 0x80)))
149#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
150#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \
151 (IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \
152 : (0x4c30 + (function * 0x40) + (pblEntry * 0x8)))
153#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
154 (IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \
155 (function * 0x8)))
156#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
157 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \
158 (function * 0x8)))
159#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
160 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \
161 (function * 0x8)))
162#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
163 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \
164 (function * 0x8)))
165#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \
166 (IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \
167 (function * 0x8)))
168#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
169 (IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \
170 (function * 0x8)))
171#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \
172 (IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \
173 (function * 0x8)))
174#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \
175 (IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \
176 (function * 0x8)))
177#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
178 (IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \
179 (function * 0x40)))
180#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
181 (IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \
182 0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40)))
183#define TSTORM_STATS_FLAGS_OFFSET(function) \
184 (IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \
185 (function * 0x8)))
186#define TSTORM_TCP_MAX_CWND_OFFSET(function) \
187 (IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \
188 (function * 0x8)))
189#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000)
190#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000)
191#define USTORM_ASSERT_LIST_INDEX_OFFSET \
192 (IS_E1H_OFFSET ? 0x8000 : 0x1000)
193#define USTORM_ASSERT_LIST_OFFSET(idx) \
194 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
195#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
196 (IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \
197 (0x4010 + (port * 0x360) + (clientId * 0x30)))
198#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \
199 (IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \
200 (0x4028 + (port * 0x360) + (clientId * 0x30)))
201#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \
202 (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff)
203#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \
204 (IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \
205 0xffffffff)
206#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
207 (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \
208 (function * 0x8)))
209#define USTORM_FUNCTION_MODE_OFFSET \
210 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
211#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \
212 (IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \
213 (function * 0x8)))
214#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \
215 (IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \
216 (function * 0x8)))
217#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \
218 (IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \
219 (function * 0x8)))
220#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \
221 (IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \
222 (function * 0x8)))
223#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
224 (IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \
225 (function * 0x8)))
226#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
227 (IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \
228 (function * 0x8)))
229#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
230 (IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \
231 (function * 0x8)))
232#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
233 (IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \
234 (function * 0x8)))
235#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \
236 (IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \
237 (function * 0x8)))
238#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \
239 (IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \
240 (function * 0x8)))
241#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
242 (IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \
243 (0x4018 + (port * 0x360) + (clientId * 0x30)))
244#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
245 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \
246 (function * 0x8)))
247#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
248 (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \
249 0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28)))
250#define USTORM_RX_PRODS_OFFSET(port, client_id) \
251 (IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \
252 : (0x4000 + (port * 0x360) + (client_id * 0x30)))
253#define USTORM_STATS_FLAGS_OFFSET(function) \
254 (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \
255 (function * 0x8)))
256#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095)
257#define USTORM_TPA_BTR_SIZE 0x1
258#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
259 (IS_E1H_OFFSET ? 0x9000 : 0x1000)
260#define XSTORM_ASSERT_LIST_OFFSET(idx) \
261 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
262#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
263 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50)))
264#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
265 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
266 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
267 0x28) + (index * 0x4)))
268#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
269 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
270 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
271#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
272 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
273 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
274#define XSTORM_E1HOV_OFFSET(function) \
275 (IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff)
276#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
277 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \
278 (function * 0x8)))
279#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
280 (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \
281 (function * 0x90)))
282#define XSTORM_FUNCTION_MODE_OFFSET \
283 (IS_E1H_OFFSET ? 0x2c50 : 0xffffffff)
284#define XSTORM_HC_BTR_OFFSET(port) \
285 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
286#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \
287 (IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \
288 (function * 0x8)))
289#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \
290 (IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \
291 (function * 0x8)))
292#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \
293 (IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \
294 (function * 0x8)))
295#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \
296 (IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \
297 (function * 0x8)))
298#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \
299 (IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
300 (function * 0x8)))
301#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
302 (IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
303 (function * 0x8)))
304#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
305 (IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
306 (function * 0x8)))
307#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
308 (IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
309 (function * 0x8)))
310#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
311 (IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
312 (function * 0x8)))
313#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
314 (IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
315 (function * 0x8)))
316#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
317 (IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
318 (function * 0x8)))
319#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
320 (IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
321 (function * 0x8)))
322#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
323 (IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
324 (function * 0x8)))
325#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
326 (IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
327 (function * 0x8)))
328#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
329 (IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
330 (function * 0x8)))
331#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
332 (IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
333 (function * 0x8)))
334#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
335 (IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
336 (function * 0x8)))
337#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
338 (IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
339 0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
340#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
341 (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
342 (function * 0x90)))
343#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
344 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
345 (function * 0x10)))
346#define XSTORM_SPQ_PROD_OFFSET(function) \
347 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
348 (function * 0x10)))
349#define XSTORM_STATS_FLAGS_OFFSET(function) \
350 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
351 (function * 0x8)))
352#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
353 (IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
354#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
355 (IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
356#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
357 (IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
358 * 0x4)) : (0x1978 + (function * 0x4)))
359#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
360
361/**
362* This file defines HSI constants for the ETH flow
363*/
364#ifdef _EVEREST_MICROCODE
365#include "microcode_constants.h"
366#include "eth_rx_bd.h"
367#include "eth_tx_bd.h"
368#include "eth_rx_cqe.h"
369#include "eth_rx_sge.h"
370#include "eth_rx_cqe_next_page.h"
371#endif
372
373/* RSS hash types */
374#define DEFAULT_HASH_TYPE 0
375#define IPV4_HASH_TYPE 1
376#define TCP_IPV4_HASH_TYPE 2
377#define IPV6_HASH_TYPE 3
378#define TCP_IPV6_HASH_TYPE 4
379#define VLAN_PRI_HASH_TYPE 5
380#define E1HOV_PRI_HASH_TYPE 6
381#define DSCP_HASH_TYPE 7
382
383
384/* Ethernet Ring parameters */
385#define X_ETH_LOCAL_RING_SIZE 13
386#define FIRST_BD_IN_PKT 0
387#define PARSE_BD_INDEX 1
388#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
389#define U_ETH_NUM_OF_SGES_TO_FETCH 8
390#define U_ETH_MAX_SGES_FOR_PACKET 3
391
392/* Rx ring params */
393#define U_ETH_LOCAL_BD_RING_SIZE 8
394#define U_ETH_LOCAL_SGE_RING_SIZE 10
395#define U_ETH_SGL_SIZE 8
396
397
398#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
399 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
400
401#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
402#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
403#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
404
405#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
406#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
407#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
408
409#define U_ETH_UNDEFINED_Q 0xFF
410
411/* values of command IDs in the ramrod message */
412#define RAMROD_CMD_ID_ETH_PORT_SETUP 80
413#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 85
414#define RAMROD_CMD_ID_ETH_STAT_QUERY 90
415#define RAMROD_CMD_ID_ETH_UPDATE 100
416#define RAMROD_CMD_ID_ETH_HALT 105
417#define RAMROD_CMD_ID_ETH_SET_MAC 110
418#define RAMROD_CMD_ID_ETH_CFC_DEL 115
419#define RAMROD_CMD_ID_ETH_PORT_DEL 120
420#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 125
421
422
423/* command values for set mac command */
424#define T_ETH_MAC_COMMAND_SET 0
425#define T_ETH_MAC_COMMAND_INVALIDATE 1
426
427#define T_ETH_INDIRECTION_TABLE_SIZE 128
428
429/*The CRC32 seed, that is used for the hash(reduction) multicast address */
430#define T_ETH_CRC32_HASH_SEED 0x00000000
431
432/* Maximal L2 clients supported */
433#define ETH_MAX_RX_CLIENTS_E1 18
434#define ETH_MAX_RX_CLIENTS_E1H 26
435
436/* Maximal aggregation queues supported */
437#define ETH_MAX_AGGREGATION_QUEUES_E1 32
438#define ETH_MAX_AGGREGATION_QUEUES_E1H 64
439
440/* ETH RSS modes */
441#define ETH_RSS_MODE_DISABLED 0
442#define ETH_RSS_MODE_REGULAR 1
443#define ETH_RSS_MODE_VLAN_PRI 2
444#define ETH_RSS_MODE_E1HOV_PRI 3
445#define ETH_RSS_MODE_IP_DSCP 4
446
447
448/**
449* This file defines HSI constants common to all microcode flows
450*/
451
452/* Connection types */
453#define ETH_CONNECTION_TYPE 0
454#define TOE_CONNECTION_TYPE 1
455#define RDMA_CONNECTION_TYPE 2
456#define ISCSI_CONNECTION_TYPE 3
457#define FCOE_CONNECTION_TYPE 4
458#define RESERVED_CONNECTION_TYPE_0 5
459#define RESERVED_CONNECTION_TYPE_1 6
460#define RESERVED_CONNECTION_TYPE_2 7
461
462
463#define PROTOCOL_STATE_BIT_OFFSET 6
464
465#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
466#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
467#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
468
469/* microcode fixed page page size 4K (chains and ring segments) */
470#define MC_PAGE_SIZE 4096
471
472
473/* Host coalescing constants */
474#define HC_IGU_BC_MODE 0
475#define HC_IGU_NBC_MODE 1
476
477#define HC_REGULAR_SEGMENT 0
478#define HC_DEFAULT_SEGMENT 1
479
480/* index numbers */
481#define HC_USTORM_DEF_SB_NUM_INDICES 8
482#define HC_CSTORM_DEF_SB_NUM_INDICES 8
483#define HC_XSTORM_DEF_SB_NUM_INDICES 4
484#define HC_TSTORM_DEF_SB_NUM_INDICES 4
485#define HC_USTORM_SB_NUM_INDICES 4
486#define HC_CSTORM_SB_NUM_INDICES 4
487
488/* index values - which counter to update */
489
490#define HC_INDEX_U_TOE_RX_CQ_CONS 0
491#define HC_INDEX_U_ETH_RX_CQ_CONS 1
492#define HC_INDEX_U_ETH_RX_BD_CONS 2
493#define HC_INDEX_U_FCOE_EQ_CONS 3
494
495#define HC_INDEX_C_TOE_TX_CQ_CONS 0
496#define HC_INDEX_C_ETH_TX_CQ_CONS 1
497#define HC_INDEX_C_ISCSI_EQ_CONS 2
498
499#define HC_INDEX_DEF_X_SPQ_CONS 0
500
501#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
502#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
503#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
504#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
505#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
506#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
507#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
508
509#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
510#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
511#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
512#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
513#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
514#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
515
516/* used by the driver to get the SB offset */
517#define USTORM_ID 0
518#define CSTORM_ID 1
519#define XSTORM_ID 2
520#define TSTORM_ID 3
521#define ATTENTION_ID 4
522
523/* max number of slow path commands per port */
524#define MAX_RAMRODS_PER_PORT 8
525
526/* values for RX ETH CQE type field */
527#define RX_ETH_CQE_TYPE_ETH_FASTPATH 0
528#define RX_ETH_CQE_TYPE_ETH_RAMROD 1
529
530
531/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
532#define EMULATION_FREQUENCY_FACTOR 1600
533#define FPGA_FREQUENCY_FACTOR 100
534
535#define TIMERS_TICK_SIZE_CHIP (1e-3)
536#define TIMERS_TICK_SIZE_EMUL \
537 ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
538#define TIMERS_TICK_SIZE_FPGA \
539 ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
540
541#define TSEMI_CLK1_RESUL_CHIP (1e-3)
542#define TSEMI_CLK1_RESUL_EMUL \
543 ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
544#define TSEMI_CLK1_RESUL_FPGA \
545 ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
546
547#define USEMI_CLK1_RESUL_CHIP (TIMERS_TICK_SIZE_CHIP)
548#define USEMI_CLK1_RESUL_EMUL (TIMERS_TICK_SIZE_EMUL)
549#define USEMI_CLK1_RESUL_FPGA (TIMERS_TICK_SIZE_FPGA)
550
551#define XSEMI_CLK1_RESUL_CHIP (1e-3)
552#define XSEMI_CLK1_RESUL_EMUL \
553 ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
554#define XSEMI_CLK1_RESUL_FPGA \
555 ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
556
557#define XSEMI_CLK2_RESUL_CHIP (1e-6)
558#define XSEMI_CLK2_RESUL_EMUL \
559 ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
560#define XSEMI_CLK2_RESUL_FPGA \
561 ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
562
563#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
564#define SDM_TIMER_TICK_RESUL_EMUL \
565 ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
566#define SDM_TIMER_TICK_RESUL_FPGA \
567 ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
568
569
570/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
571#define XSTORM_IP_ID_ROLL_HALF 0x8000
572#define XSTORM_IP_ID_ROLL_ALL 0
573
574#define FW_LOG_LIST_SIZE 50
575
576#define NUM_OF_PROTOCOLS 4
577#define NUM_OF_SAFC_BITS 16
578#define MAX_COS_NUMBER 4
579#define MAX_T_STAT_COUNTER_ID 18
580#define MAX_X_STAT_COUNTER_ID 18
581#define MAX_U_STAT_COUNTER_ID 18
582
583
584#define UNKNOWN_ADDRESS 0
585#define UNICAST_ADDRESS 1
586#define MULTICAST_ADDRESS 2
587#define BROADCAST_ADDRESS 3
588
589#define SINGLE_FUNCTION 0
590#define MULTI_FUNCTION 1
591
592#define IP_V4 0
593#define IP_V6 1
594
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
new file mode 100644
index 000000000000..3f5ee5d7cc2a
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -0,0 +1,37 @@
1/* bnx2x_fw_file_hdr.h: FW binary file header structure.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Vladislav Zolotarov <vladz@broadcom.com>
11 * Based on the original idea of John Wright <john.wright@hp.com>.
12 */
13
14#ifndef BNX2X_INIT_FILE_HDR_H
15#define BNX2X_INIT_FILE_HDR_H
16
17struct bnx2x_fw_file_section {
18 __be32 len;
19 __be32 offset;
20};
21
22struct bnx2x_fw_file_hdr {
23 struct bnx2x_fw_file_section init_ops;
24 struct bnx2x_fw_file_section init_ops_offsets;
25 struct bnx2x_fw_file_section init_data;
26 struct bnx2x_fw_file_section tsem_int_table_data;
27 struct bnx2x_fw_file_section tsem_pram_data;
28 struct bnx2x_fw_file_section usem_int_table_data;
29 struct bnx2x_fw_file_section usem_pram_data;
30 struct bnx2x_fw_file_section csem_int_table_data;
31 struct bnx2x_fw_file_section csem_pram_data;
32 struct bnx2x_fw_file_section xsem_int_table_data;
33 struct bnx2x_fw_file_section xsem_pram_data;
34 struct bnx2x_fw_file_section fw_version;
35};
36
37#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
new file mode 100644
index 000000000000..fd1f29e0317d
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -0,0 +1,3138 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10struct license_key {
11 u32 reserved[6];
12
13#if defined(__BIG_ENDIAN)
14 u16 max_iscsi_init_conn;
15 u16 max_iscsi_trgt_conn;
16#elif defined(__LITTLE_ENDIAN)
17 u16 max_iscsi_trgt_conn;
18 u16 max_iscsi_init_conn;
19#endif
20
21 u32 reserved_a[6];
22};
23
24
25#define PORT_0 0
26#define PORT_1 1
27#define PORT_MAX 2
28
29/****************************************************************************
30 * Shared HW configuration *
31 ****************************************************************************/
32struct shared_hw_cfg { /* NVRAM Offset */
33 /* Up to 16 bytes of NULL-terminated string */
34 u8 part_num[16]; /* 0x104 */
35
36 u32 config; /* 0x114 */
37#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
38#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
39#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
40#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
41#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002
42
43#define SHARED_HW_CFG_PORT_SWAP 0x00000004
44
45#define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
46
47#define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
48#define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
49 /* Whatever MFW found in NVM
50 (if multiple found, priority order is: NC-SI, UMP, IPMI) */
51#define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
52#define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
53#define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
54#define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
55 /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
56 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
57#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
58 /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
59 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
60#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
61 /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
62 (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
63#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
64
65#define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000
66#define SHARED_HW_CFG_LED_MODE_SHIFT 16
67#define SHARED_HW_CFG_LED_MAC1 0x00000000
68#define SHARED_HW_CFG_LED_PHY1 0x00010000
69#define SHARED_HW_CFG_LED_PHY2 0x00020000
70#define SHARED_HW_CFG_LED_PHY3 0x00030000
71#define SHARED_HW_CFG_LED_MAC2 0x00040000
72#define SHARED_HW_CFG_LED_PHY4 0x00050000
73#define SHARED_HW_CFG_LED_PHY5 0x00060000
74#define SHARED_HW_CFG_LED_PHY6 0x00070000
75#define SHARED_HW_CFG_LED_MAC3 0x00080000
76#define SHARED_HW_CFG_LED_PHY7 0x00090000
77#define SHARED_HW_CFG_LED_PHY9 0x000a0000
78#define SHARED_HW_CFG_LED_PHY11 0x000b0000
79#define SHARED_HW_CFG_LED_MAC4 0x000c0000
80#define SHARED_HW_CFG_LED_PHY8 0x000d0000
81
82#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
83#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
84#define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000
85#define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000
86#define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000
87#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000
88#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
89#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000
90
91 u32 config2; /* 0x118 */
92 /* one time auto detect grace period (in sec) */
93#define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff
94#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0
95
96#define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
97
98 /* The default value for the core clock is 250MHz and it is
99 achieved by setting the clock change to 4 */
100#define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00
101#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9
102
103#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
104#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
105
106#define SHARED_HW_CFG_HIDE_PORT1 0x00002000
107
108 /* The fan failure mechanism is usually related to the PHY type
109 since the power consumption of the board is determined by the PHY.
110 Currently, fan is required for most designs with SFX7101, BCM8727
111 and BCM8481. If a fan is not required for a board which uses one
112 of those PHYs, this field should be set to "Disabled". If a fan is
113 required for a different PHY type, this option should be set to
114 "Enabled".
115 The fan failure indication is expected on
116 SPIO5 */
117#define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
118#define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
119#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
120#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
121#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
122
123 u32 power_dissipated; /* 0x11c */
124#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
125#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
126
127#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
128#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
129#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
130#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
131#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
132#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
133
134 u32 ump_nc_si_config; /* 0x120 */
135#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
136#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
137#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
138#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
139#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
140#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
141
142#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00
143#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8
144
145#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000
146#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16
147#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000
148#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
149
150 u32 board; /* 0x124 */
151#define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000
152#define SHARED_HW_CFG_BOARD_REV_SHIFT 16
153
154#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000
155#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
156
157#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000
158#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
159
160 u32 reserved; /* 0x128 */
161
162};
163
164
165/****************************************************************************
166 * Port HW configuration *
167 ****************************************************************************/
168struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
169
170 u32 pci_id;
171#define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
172#define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
173
174 u32 pci_sub_id;
175#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000
176#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff
177
178 u32 power_dissipated;
179#define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000
180#define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
181#define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000
182#define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
183#define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00
184#define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
185#define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff
186#define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
187
188 u32 power_consumed;
189#define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000
190#define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
191#define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000
192#define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
193#define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00
194#define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
195#define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff
196#define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
197
198 u32 mac_upper;
199#define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff
200#define PORT_HW_CFG_UPPERMAC_SHIFT 0
201 u32 mac_lower;
202
203 u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */
204 u32 iscsi_mac_lower;
205
206 u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */
207 u32 rdma_mac_lower;
208
209 u32 serdes_config;
210#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF
211#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
212
213#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000
214#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
215
216
217 u32 Reserved0[16]; /* 0x158 */
218
219 /* for external PHY, or forced mode or during AN */
220 u16 xgxs_config_rx[4]; /* 0x198 */
221
222 u16 xgxs_config_tx[4]; /* 0x1A0 */
223
224 u32 Reserved1[64]; /* 0x1A8 */
225
226 u32 lane_config;
227#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
228#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
229#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
230#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
231#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
232#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
233#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000
234#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
235 /* AN and forced */
236#define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
237 /* forced only */
238#define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
239 /* forced only */
240#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
241 /* forced only */
242#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
243
244 u32 external_phy_config;
245#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
246#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
247#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
248#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000
249#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
250
251#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000
252#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
253
254#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00
255#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
256#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
257#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100
258#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200
259#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300
260#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400
261#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
262#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600
263#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
264#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
267#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
268#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
269#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
270
271#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
272#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
273
274 u32 speed_capability_mask;
275#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000
276#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
277#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
278#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
279#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
280#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
281#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
282#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
283#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
284#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12G 0x00800000
285#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12_5G 0x01000000
286#define PORT_HW_CFG_SPEED_CAPABILITY_D0_13G 0x02000000
287#define PORT_HW_CFG_SPEED_CAPABILITY_D0_15G 0x04000000
288#define PORT_HW_CFG_SPEED_CAPABILITY_D0_16G 0x08000000
289#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
290
291#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff
292#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
293#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
294#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
295#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
296#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
297#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
298#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
299#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
300#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12G 0x00000080
301#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12_5G 0x00000100
302#define PORT_HW_CFG_SPEED_CAPABILITY_D3_13G 0x00000200
303#define PORT_HW_CFG_SPEED_CAPABILITY_D3_15G 0x00000400
304#define PORT_HW_CFG_SPEED_CAPABILITY_D3_16G 0x00000800
305#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
306
307 u32 reserved[2];
308
309};
310
311
312/****************************************************************************
313 * Shared Feature configuration *
314 ****************************************************************************/
315struct shared_feat_cfg { /* NVRAM Offset */
316
317 u32 config; /* 0x450 */
318#define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
319
320 /* Use the values from options 47 and 48 instead of the HW default
321 values */
322#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
323#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
324
325#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100
326
327};
328
329
330/****************************************************************************
331 * Port Feature configuration *
332 ****************************************************************************/
333struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
334
335 u32 config;
336#define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
337#define PORT_FEATURE_BAR1_SIZE_SHIFT 0
338#define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000
339#define PORT_FEATURE_BAR1_SIZE_64K 0x00000001
340#define PORT_FEATURE_BAR1_SIZE_128K 0x00000002
341#define PORT_FEATURE_BAR1_SIZE_256K 0x00000003
342#define PORT_FEATURE_BAR1_SIZE_512K 0x00000004
343#define PORT_FEATURE_BAR1_SIZE_1M 0x00000005
344#define PORT_FEATURE_BAR1_SIZE_2M 0x00000006
345#define PORT_FEATURE_BAR1_SIZE_4M 0x00000007
346#define PORT_FEATURE_BAR1_SIZE_8M 0x00000008
347#define PORT_FEATURE_BAR1_SIZE_16M 0x00000009
348#define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a
349#define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b
350#define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c
351#define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d
352#define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e
353#define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f
354#define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0
355#define PORT_FEATURE_BAR2_SIZE_SHIFT 4
356#define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000
357#define PORT_FEATURE_BAR2_SIZE_64K 0x00000010
358#define PORT_FEATURE_BAR2_SIZE_128K 0x00000020
359#define PORT_FEATURE_BAR2_SIZE_256K 0x00000030
360#define PORT_FEATURE_BAR2_SIZE_512K 0x00000040
361#define PORT_FEATURE_BAR2_SIZE_1M 0x00000050
362#define PORT_FEATURE_BAR2_SIZE_2M 0x00000060
363#define PORT_FEATURE_BAR2_SIZE_4M 0x00000070
364#define PORT_FEATURE_BAR2_SIZE_8M 0x00000080
365#define PORT_FEATURE_BAR2_SIZE_16M 0x00000090
366#define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0
367#define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0
368#define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0
369#define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0
370#define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0
371#define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0
372#define PORT_FEATURE_EN_SIZE_MASK 0x07000000
373#define PORT_FEATURE_EN_SIZE_SHIFT 24
374#define PORT_FEATURE_WOL_ENABLED 0x01000000
375#define PORT_FEATURE_MBA_ENABLED 0x02000000
376#define PORT_FEATURE_MFW_ENABLED 0x04000000
377
378 /* Reserved bits: 28-29 */
379 /* Check the optic vendor via i2c against a list of approved modules
380 in a separate nvram image */
381#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000
382#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
383#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT 0x00000000
384#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER 0x20000000
385#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
386#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
387
388
389 u32 wol_config;
390 /* Default is used when driver sets to "auto" mode */
391#define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003
392#define PORT_FEATURE_WOL_DEFAULT_SHIFT 0
393#define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000
394#define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001
395#define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002
396#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003
397#define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004
398#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008
399#define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
400
401 u32 mba_config;
402#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000003
403#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
404#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
405#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
406#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
407#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
408#define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100
409#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200
410#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
411#define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
412#define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
413#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000
414#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
415#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
416#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
417#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
418#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
419#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
420#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
421#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
422#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
423#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
424#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
425#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
426#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
427#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
428#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
429#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
430#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
431#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000
432#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
433#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
434#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
435#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
436#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
437#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
438#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
439#define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000
440#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
441#define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
442#define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000
443#define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000
444#define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000
445#define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000
446#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000
447#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000
448#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000
449#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KX4 0x20000000
450#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KR 0x24000000
451#define PORT_FEATURE_MBA_LINK_SPEED_12GBPS 0x28000000
452#define PORT_FEATURE_MBA_LINK_SPEED_12_5GBPS 0x2c000000
453#define PORT_FEATURE_MBA_LINK_SPEED_13GBPS 0x30000000
454#define PORT_FEATURE_MBA_LINK_SPEED_15GBPS 0x34000000
455#define PORT_FEATURE_MBA_LINK_SPEED_16GBPS 0x38000000
456
457 u32 bmc_config;
458#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000
459#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001
460
461 u32 mba_vlan_cfg;
462#define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff
463#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
464#define PORT_FEATURE_MBA_VLAN_EN 0x00010000
465
466 u32 resource_cfg;
467#define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001
468#define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002
469#define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004
470#define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008
471#define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010
472
473 u32 smbus_config;
474 /* Obsolete */
475#define PORT_FEATURE_SMBUS_EN 0x00000001
476#define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
477#define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
478
479 u32 reserved1;
480
481 u32 link_config; /* Used as HW defaults for the driver */
482#define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
483#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
484 /* (forced) low speed switch (< 10G) */
485#define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
486 /* (forced) high speed switch (>= 10G) */
487#define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
488#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
489#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
490
491#define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000
492#define PORT_FEATURE_LINK_SPEED_SHIFT 16
493#define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
494#define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
495#define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
496#define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
497#define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
498#define PORT_FEATURE_LINK_SPEED_1G 0x00050000
499#define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
500#define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
501#define PORT_FEATURE_LINK_SPEED_10G_KX4 0x00080000
502#define PORT_FEATURE_LINK_SPEED_10G_KR 0x00090000
503#define PORT_FEATURE_LINK_SPEED_12G 0x000a0000
504#define PORT_FEATURE_LINK_SPEED_12_5G 0x000b0000
505#define PORT_FEATURE_LINK_SPEED_13G 0x000c0000
506#define PORT_FEATURE_LINK_SPEED_15G 0x000d0000
507#define PORT_FEATURE_LINK_SPEED_16G 0x000e0000
508
509#define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
510#define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
511#define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
512#define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
513#define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
514#define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
515#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
516
517 /* The default for MCP link configuration,
518 uses the same defines as link_config */
519 u32 mfw_wol_link_cfg;
520
521 u32 reserved[19];
522
523};
524
525
526/****************************************************************************
527 * Device Information *
528 ****************************************************************************/
529struct shm_dev_info { /* size */
530
531 u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
532
533 struct shared_hw_cfg shared_hw_config; /* 40 */
534
535 struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
536
537 struct shared_feat_cfg shared_feature_config; /* 4 */
538
539 struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
540
541};
542
543
544#define FUNC_0 0
545#define FUNC_1 1
546#define FUNC_2 2
547#define FUNC_3 3
548#define FUNC_4 4
549#define FUNC_5 5
550#define FUNC_6 6
551#define FUNC_7 7
552#define E1_FUNC_MAX 2
553#define E1H_FUNC_MAX 8
554
555#define VN_0 0
556#define VN_1 1
557#define VN_2 2
558#define VN_3 3
559#define E1VN_MAX 1
560#define E1HVN_MAX 4
561
562
563/* This value (in milliseconds) determines the frequency of the driver
564 * issuing the PULSE message code. The firmware monitors this periodic
565 * pulse to determine when to switch to an OS-absent mode. */
566#define DRV_PULSE_PERIOD_MS 250
567
568/* This value (in milliseconds) determines how long the driver should
569 * wait for an acknowledgement from the firmware before timing out. Once
570 * the firmware has timed out, the driver will assume there is no firmware
571 * running and there won't be any firmware-driver synchronization during a
572 * driver reset. */
573#define FW_ACK_TIME_OUT_MS 5000
574
575#define FW_ACK_POLL_TIME_MS 1
576
577#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
578
579/* LED Blink rate that will achieve ~15.9Hz */
580#define LED_BLINK_RATE_VAL 480
581
582/****************************************************************************
583 * Driver <-> FW Mailbox *
584 ****************************************************************************/
585struct drv_port_mb {
586
587 u32 link_status;
588 /* Driver should update this field on any link change event */
589
590#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
591#define LINK_STATUS_LINK_UP 0x00000001
592#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
593#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
594#define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
595#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
596#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
597#define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
598#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
599#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
600#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
601#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
602#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
603#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
604#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
605#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
606#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
607#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1)
608#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1)
609#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1)
610#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1)
611#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1)
612#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1)
613#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1)
614#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1)
615#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1)
616#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1)
617
618#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
619#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
620
621#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
622#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
623#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
624
625#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
626#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
627#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
628#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
629#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
630#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
631#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
632
633#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
634#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
635
636#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
637#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
638
639#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
640#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
641#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
642#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
643#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
644
645#define LINK_STATUS_SERDES_LINK 0x00100000
646
647#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
648#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
649#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
650#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000
651#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000
652#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000
653#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000
654#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000
655
656 u32 port_stx;
657
658 u32 stat_nig_timer;
659
660 /* MCP firmware does not use this field */
661 u32 ext_phy_fw_version;
662
663};
664
665
666struct drv_func_mb {
667
668 u32 drv_mb_header;
669#define DRV_MSG_CODE_MASK 0xffff0000
670#define DRV_MSG_CODE_LOAD_REQ 0x10000000
671#define DRV_MSG_CODE_LOAD_DONE 0x11000000
672#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
673#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
674#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
675#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
676#define DRV_MSG_CODE_DCC_OK 0x30000000
677#define DRV_MSG_CODE_DCC_FAILURE 0x31000000
678#define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
679#define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
680#define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
681#define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
682#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
683#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
684#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
685 /*
686 * The optic module verification commands require bootcode
687 * v5.0.6 or later
688 */
689#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
690#define REQ_BC_VER_4_VRFY_OPT_MDL 0x00050006
691
692#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
693#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
694#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
695#define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
696
697#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
698
699 u32 drv_mb_param;
700
701 u32 fw_mb_header;
702#define FW_MSG_CODE_MASK 0xffff0000
703#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
704#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
705#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
706#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
707#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
708#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
709#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
710#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
711#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
712#define FW_MSG_CODE_DCC_DONE 0x30100000
713#define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
714#define FW_MSG_CODE_DIAG_REFUSE 0x50200000
715#define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
716#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
717#define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
718#define FW_MSG_CODE_GET_KEY_DONE 0x80100000
719#define FW_MSG_CODE_NO_KEY 0x80f00000
720#define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
721#define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
722#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
723#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
724#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
725#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
726#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
727#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
728#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
729
730#define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
731#define FW_MSG_CODE_LIC_RESPONSE 0xff020000
732#define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
733#define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
734
735#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
736
737 u32 fw_mb_param;
738
739 u32 drv_pulse_mb;
740#define DRV_PULSE_SEQ_MASK 0x00007fff
741#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
742 /* The system time is in the format of
743 * (year-2001)*12*32 + month*32 + day. */
744#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
745 /* Indicate to the firmware not to go into the
746 * OS-absent when it is not getting driver pulse.
747 * This is used for debugging as well for PXE(MBA). */
748
749 u32 mcp_pulse_mb;
750#define MCP_PULSE_SEQ_MASK 0x00007fff
751#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
752 /* Indicates to the driver not to assert due to lack
753 * of MCP response */
754#define MCP_EVENT_MASK 0xffff0000
755#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
756
757 u32 iscsi_boot_signature;
758 u32 iscsi_boot_block_offset;
759
760 u32 drv_status;
761#define DRV_STATUS_PMF 0x00000001
762
763#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
764#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
765#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
766#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
767#define DRV_STATUS_DCC_RESERVED1 0x00000800
768#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
769#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
770
771 u32 virt_mac_upper;
772#define VIRT_MAC_SIGN_MASK 0xffff0000
773#define VIRT_MAC_SIGNATURE 0x564d0000
774 u32 virt_mac_lower;
775
776};
777
778
779/****************************************************************************
780 * Management firmware state *
781 ****************************************************************************/
782/* Allocate 440 bytes for management firmware */
783#define MGMTFW_STATE_WORD_SIZE 110
784
785struct mgmtfw_state {
786 u32 opaque[MGMTFW_STATE_WORD_SIZE];
787};
788
789
790/****************************************************************************
791 * Multi-Function configuration *
792 ****************************************************************************/
793struct shared_mf_cfg {
794
795 u32 clp_mb;
796#define SHARED_MF_CLP_SET_DEFAULT 0x00000000
797 /* set by CLP */
798#define SHARED_MF_CLP_EXIT 0x00000001
799 /* set by MCP */
800#define SHARED_MF_CLP_EXIT_DONE 0x00010000
801
802};
803
804struct port_mf_cfg {
805
806 u32 dynamic_cfg; /* device control channel */
807#define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
808#define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
809#define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
810
811 u32 reserved[3];
812
813};
814
815struct func_mf_cfg {
816
817 u32 config;
818 /* E/R/I/D */
819 /* function 0 of each port cannot be hidden */
820#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
821
822#define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007
823#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
824#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
825#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
826#define FUNC_MF_CFG_PROTOCOL_DEFAULT\
827 FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
828
829#define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
830
831 /* PRI */
832 /* 0 - low priority, 3 - high priority */
833#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
834#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
835#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
836
837 /* MINBW, MAXBW */
838 /* value range - 0..100, increments in 100Mbps */
839#define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
840#define FUNC_MF_CFG_MIN_BW_SHIFT 16
841#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
842#define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
843#define FUNC_MF_CFG_MAX_BW_SHIFT 24
844#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
845
846 u32 mac_upper; /* MAC */
847#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
848#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
849#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
850 u32 mac_lower;
851#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
852
853 u32 e1hov_tag; /* VNI */
854#define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
855#define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
856#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
857
858 u32 reserved[2];
859
860};
861
862struct mf_cfg {
863
864 struct shared_mf_cfg shared_mf_config;
865 struct port_mf_cfg port_mf_config[PORT_MAX];
866 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
867
868};
869
870
871/****************************************************************************
872 * Shared Memory Region *
873 ****************************************************************************/
874struct shmem_region { /* SharedMem Offset (size) */
875
876 u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
877#define SHR_MEM_FORMAT_REV_ID ('A'<<24)
878#define SHR_MEM_FORMAT_REV_MASK 0xff000000
879 /* validity bits */
880#define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
881#define SHR_MEM_VALIDITY_MB 0x00200000
882#define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
883#define SHR_MEM_VALIDITY_RESERVED 0x00000007
884 /* One licensing bit should be set */
885#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
886#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
887#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
888#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
889 /* Active MFW */
890#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
891#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
892#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
893#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
894#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
895#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
896
897 struct shm_dev_info dev_info; /* 0x8 (0x438) */
898
899 struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
900
901 /* FW information (for internal FW use) */
902 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
903 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
904
905 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
906 struct drv_func_mb func_mb[E1H_FUNC_MAX];
907
908 struct mf_cfg mf_cfg;
909
910}; /* 0x6dc */
911
912
913struct shmem2_region {
914
915 u32 size;
916
917 u32 dcc_support;
918#define SHMEM_DCC_SUPPORT_NONE 0x00000000
919#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
920#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
921#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
922#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
923#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
924#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
925
926};
927
928
929struct emac_stats {
930 u32 rx_stat_ifhcinoctets;
931 u32 rx_stat_ifhcinbadoctets;
932 u32 rx_stat_etherstatsfragments;
933 u32 rx_stat_ifhcinucastpkts;
934 u32 rx_stat_ifhcinmulticastpkts;
935 u32 rx_stat_ifhcinbroadcastpkts;
936 u32 rx_stat_dot3statsfcserrors;
937 u32 rx_stat_dot3statsalignmenterrors;
938 u32 rx_stat_dot3statscarriersenseerrors;
939 u32 rx_stat_xonpauseframesreceived;
940 u32 rx_stat_xoffpauseframesreceived;
941 u32 rx_stat_maccontrolframesreceived;
942 u32 rx_stat_xoffstateentered;
943 u32 rx_stat_dot3statsframestoolong;
944 u32 rx_stat_etherstatsjabbers;
945 u32 rx_stat_etherstatsundersizepkts;
946 u32 rx_stat_etherstatspkts64octets;
947 u32 rx_stat_etherstatspkts65octetsto127octets;
948 u32 rx_stat_etherstatspkts128octetsto255octets;
949 u32 rx_stat_etherstatspkts256octetsto511octets;
950 u32 rx_stat_etherstatspkts512octetsto1023octets;
951 u32 rx_stat_etherstatspkts1024octetsto1522octets;
952 u32 rx_stat_etherstatspktsover1522octets;
953
954 u32 rx_stat_falsecarriererrors;
955
956 u32 tx_stat_ifhcoutoctets;
957 u32 tx_stat_ifhcoutbadoctets;
958 u32 tx_stat_etherstatscollisions;
959 u32 tx_stat_outxonsent;
960 u32 tx_stat_outxoffsent;
961 u32 tx_stat_flowcontroldone;
962 u32 tx_stat_dot3statssinglecollisionframes;
963 u32 tx_stat_dot3statsmultiplecollisionframes;
964 u32 tx_stat_dot3statsdeferredtransmissions;
965 u32 tx_stat_dot3statsexcessivecollisions;
966 u32 tx_stat_dot3statslatecollisions;
967 u32 tx_stat_ifhcoutucastpkts;
968 u32 tx_stat_ifhcoutmulticastpkts;
969 u32 tx_stat_ifhcoutbroadcastpkts;
970 u32 tx_stat_etherstatspkts64octets;
971 u32 tx_stat_etherstatspkts65octetsto127octets;
972 u32 tx_stat_etherstatspkts128octetsto255octets;
973 u32 tx_stat_etherstatspkts256octetsto511octets;
974 u32 tx_stat_etherstatspkts512octetsto1023octets;
975 u32 tx_stat_etherstatspkts1024octetsto1522octets;
976 u32 tx_stat_etherstatspktsover1522octets;
977 u32 tx_stat_dot3statsinternalmactransmiterrors;
978};
979
980
981struct bmac_stats {
982 u32 tx_stat_gtpkt_lo;
983 u32 tx_stat_gtpkt_hi;
984 u32 tx_stat_gtxpf_lo;
985 u32 tx_stat_gtxpf_hi;
986 u32 tx_stat_gtfcs_lo;
987 u32 tx_stat_gtfcs_hi;
988 u32 tx_stat_gtmca_lo;
989 u32 tx_stat_gtmca_hi;
990 u32 tx_stat_gtbca_lo;
991 u32 tx_stat_gtbca_hi;
992 u32 tx_stat_gtfrg_lo;
993 u32 tx_stat_gtfrg_hi;
994 u32 tx_stat_gtovr_lo;
995 u32 tx_stat_gtovr_hi;
996 u32 tx_stat_gt64_lo;
997 u32 tx_stat_gt64_hi;
998 u32 tx_stat_gt127_lo;
999 u32 tx_stat_gt127_hi;
1000 u32 tx_stat_gt255_lo;
1001 u32 tx_stat_gt255_hi;
1002 u32 tx_stat_gt511_lo;
1003 u32 tx_stat_gt511_hi;
1004 u32 tx_stat_gt1023_lo;
1005 u32 tx_stat_gt1023_hi;
1006 u32 tx_stat_gt1518_lo;
1007 u32 tx_stat_gt1518_hi;
1008 u32 tx_stat_gt2047_lo;
1009 u32 tx_stat_gt2047_hi;
1010 u32 tx_stat_gt4095_lo;
1011 u32 tx_stat_gt4095_hi;
1012 u32 tx_stat_gt9216_lo;
1013 u32 tx_stat_gt9216_hi;
1014 u32 tx_stat_gt16383_lo;
1015 u32 tx_stat_gt16383_hi;
1016 u32 tx_stat_gtmax_lo;
1017 u32 tx_stat_gtmax_hi;
1018 u32 tx_stat_gtufl_lo;
1019 u32 tx_stat_gtufl_hi;
1020 u32 tx_stat_gterr_lo;
1021 u32 tx_stat_gterr_hi;
1022 u32 tx_stat_gtbyt_lo;
1023 u32 tx_stat_gtbyt_hi;
1024
1025 u32 rx_stat_gr64_lo;
1026 u32 rx_stat_gr64_hi;
1027 u32 rx_stat_gr127_lo;
1028 u32 rx_stat_gr127_hi;
1029 u32 rx_stat_gr255_lo;
1030 u32 rx_stat_gr255_hi;
1031 u32 rx_stat_gr511_lo;
1032 u32 rx_stat_gr511_hi;
1033 u32 rx_stat_gr1023_lo;
1034 u32 rx_stat_gr1023_hi;
1035 u32 rx_stat_gr1518_lo;
1036 u32 rx_stat_gr1518_hi;
1037 u32 rx_stat_gr2047_lo;
1038 u32 rx_stat_gr2047_hi;
1039 u32 rx_stat_gr4095_lo;
1040 u32 rx_stat_gr4095_hi;
1041 u32 rx_stat_gr9216_lo;
1042 u32 rx_stat_gr9216_hi;
1043 u32 rx_stat_gr16383_lo;
1044 u32 rx_stat_gr16383_hi;
1045 u32 rx_stat_grmax_lo;
1046 u32 rx_stat_grmax_hi;
1047 u32 rx_stat_grpkt_lo;
1048 u32 rx_stat_grpkt_hi;
1049 u32 rx_stat_grfcs_lo;
1050 u32 rx_stat_grfcs_hi;
1051 u32 rx_stat_grmca_lo;
1052 u32 rx_stat_grmca_hi;
1053 u32 rx_stat_grbca_lo;
1054 u32 rx_stat_grbca_hi;
1055 u32 rx_stat_grxcf_lo;
1056 u32 rx_stat_grxcf_hi;
1057 u32 rx_stat_grxpf_lo;
1058 u32 rx_stat_grxpf_hi;
1059 u32 rx_stat_grxuo_lo;
1060 u32 rx_stat_grxuo_hi;
1061 u32 rx_stat_grjbr_lo;
1062 u32 rx_stat_grjbr_hi;
1063 u32 rx_stat_grovr_lo;
1064 u32 rx_stat_grovr_hi;
1065 u32 rx_stat_grflr_lo;
1066 u32 rx_stat_grflr_hi;
1067 u32 rx_stat_grmeg_lo;
1068 u32 rx_stat_grmeg_hi;
1069 u32 rx_stat_grmeb_lo;
1070 u32 rx_stat_grmeb_hi;
1071 u32 rx_stat_grbyt_lo;
1072 u32 rx_stat_grbyt_hi;
1073 u32 rx_stat_grund_lo;
1074 u32 rx_stat_grund_hi;
1075 u32 rx_stat_grfrg_lo;
1076 u32 rx_stat_grfrg_hi;
1077 u32 rx_stat_grerb_lo;
1078 u32 rx_stat_grerb_hi;
1079 u32 rx_stat_grfre_lo;
1080 u32 rx_stat_grfre_hi;
1081 u32 rx_stat_gripj_lo;
1082 u32 rx_stat_gripj_hi;
1083};
1084
1085
1086union mac_stats {
1087 struct emac_stats emac_stats;
1088 struct bmac_stats bmac_stats;
1089};
1090
1091
1092struct mac_stx {
1093 /* in_bad_octets */
1094 u32 rx_stat_ifhcinbadoctets_hi;
1095 u32 rx_stat_ifhcinbadoctets_lo;
1096
1097 /* out_bad_octets */
1098 u32 tx_stat_ifhcoutbadoctets_hi;
1099 u32 tx_stat_ifhcoutbadoctets_lo;
1100
1101 /* crc_receive_errors */
1102 u32 rx_stat_dot3statsfcserrors_hi;
1103 u32 rx_stat_dot3statsfcserrors_lo;
1104 /* alignment_errors */
1105 u32 rx_stat_dot3statsalignmenterrors_hi;
1106 u32 rx_stat_dot3statsalignmenterrors_lo;
1107 /* carrier_sense_errors */
1108 u32 rx_stat_dot3statscarriersenseerrors_hi;
1109 u32 rx_stat_dot3statscarriersenseerrors_lo;
1110 /* false_carrier_detections */
1111 u32 rx_stat_falsecarriererrors_hi;
1112 u32 rx_stat_falsecarriererrors_lo;
1113
1114 /* runt_packets_received */
1115 u32 rx_stat_etherstatsundersizepkts_hi;
1116 u32 rx_stat_etherstatsundersizepkts_lo;
1117 /* jabber_packets_received */
1118 u32 rx_stat_dot3statsframestoolong_hi;
1119 u32 rx_stat_dot3statsframestoolong_lo;
1120
1121 /* error_runt_packets_received */
1122 u32 rx_stat_etherstatsfragments_hi;
1123 u32 rx_stat_etherstatsfragments_lo;
1124 /* error_jabber_packets_received */
1125 u32 rx_stat_etherstatsjabbers_hi;
1126 u32 rx_stat_etherstatsjabbers_lo;
1127
1128 /* control_frames_received */
1129 u32 rx_stat_maccontrolframesreceived_hi;
1130 u32 rx_stat_maccontrolframesreceived_lo;
1131 u32 rx_stat_bmac_xpf_hi;
1132 u32 rx_stat_bmac_xpf_lo;
1133 u32 rx_stat_bmac_xcf_hi;
1134 u32 rx_stat_bmac_xcf_lo;
1135
1136 /* xoff_state_entered */
1137 u32 rx_stat_xoffstateentered_hi;
1138 u32 rx_stat_xoffstateentered_lo;
1139 /* pause_xon_frames_received */
1140 u32 rx_stat_xonpauseframesreceived_hi;
1141 u32 rx_stat_xonpauseframesreceived_lo;
1142 /* pause_xoff_frames_received */
1143 u32 rx_stat_xoffpauseframesreceived_hi;
1144 u32 rx_stat_xoffpauseframesreceived_lo;
1145 /* pause_xon_frames_transmitted */
1146 u32 tx_stat_outxonsent_hi;
1147 u32 tx_stat_outxonsent_lo;
1148 /* pause_xoff_frames_transmitted */
1149 u32 tx_stat_outxoffsent_hi;
1150 u32 tx_stat_outxoffsent_lo;
1151 /* flow_control_done */
1152 u32 tx_stat_flowcontroldone_hi;
1153 u32 tx_stat_flowcontroldone_lo;
1154
1155 /* ether_stats_collisions */
1156 u32 tx_stat_etherstatscollisions_hi;
1157 u32 tx_stat_etherstatscollisions_lo;
1158 /* single_collision_transmit_frames */
1159 u32 tx_stat_dot3statssinglecollisionframes_hi;
1160 u32 tx_stat_dot3statssinglecollisionframes_lo;
1161 /* multiple_collision_transmit_frames */
1162 u32 tx_stat_dot3statsmultiplecollisionframes_hi;
1163 u32 tx_stat_dot3statsmultiplecollisionframes_lo;
1164 /* deferred_transmissions */
1165 u32 tx_stat_dot3statsdeferredtransmissions_hi;
1166 u32 tx_stat_dot3statsdeferredtransmissions_lo;
1167 /* excessive_collision_frames */
1168 u32 tx_stat_dot3statsexcessivecollisions_hi;
1169 u32 tx_stat_dot3statsexcessivecollisions_lo;
1170 /* late_collision_frames */
1171 u32 tx_stat_dot3statslatecollisions_hi;
1172 u32 tx_stat_dot3statslatecollisions_lo;
1173
1174 /* frames_transmitted_64_bytes */
1175 u32 tx_stat_etherstatspkts64octets_hi;
1176 u32 tx_stat_etherstatspkts64octets_lo;
1177 /* frames_transmitted_65_127_bytes */
1178 u32 tx_stat_etherstatspkts65octetsto127octets_hi;
1179 u32 tx_stat_etherstatspkts65octetsto127octets_lo;
1180 /* frames_transmitted_128_255_bytes */
1181 u32 tx_stat_etherstatspkts128octetsto255octets_hi;
1182 u32 tx_stat_etherstatspkts128octetsto255octets_lo;
1183 /* frames_transmitted_256_511_bytes */
1184 u32 tx_stat_etherstatspkts256octetsto511octets_hi;
1185 u32 tx_stat_etherstatspkts256octetsto511octets_lo;
1186 /* frames_transmitted_512_1023_bytes */
1187 u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
1188 u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
1189 /* frames_transmitted_1024_1522_bytes */
1190 u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
1191 u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
1192 /* frames_transmitted_1523_9022_bytes */
1193 u32 tx_stat_etherstatspktsover1522octets_hi;
1194 u32 tx_stat_etherstatspktsover1522octets_lo;
1195 u32 tx_stat_bmac_2047_hi;
1196 u32 tx_stat_bmac_2047_lo;
1197 u32 tx_stat_bmac_4095_hi;
1198 u32 tx_stat_bmac_4095_lo;
1199 u32 tx_stat_bmac_9216_hi;
1200 u32 tx_stat_bmac_9216_lo;
1201 u32 tx_stat_bmac_16383_hi;
1202 u32 tx_stat_bmac_16383_lo;
1203
1204 /* internal_mac_transmit_errors */
1205 u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
1206 u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
1207
1208 /* if_out_discards */
1209 u32 tx_stat_bmac_ufl_hi;
1210 u32 tx_stat_bmac_ufl_lo;
1211};
1212
1213
1214#define MAC_STX_IDX_MAX 2
1215
1216struct host_port_stats {
1217 u32 host_port_stats_start;
1218
1219 struct mac_stx mac_stx[MAC_STX_IDX_MAX];
1220
1221 u32 brb_drop_hi;
1222 u32 brb_drop_lo;
1223
1224 u32 host_port_stats_end;
1225};
1226
1227
1228struct host_func_stats {
1229 u32 host_func_stats_start;
1230
1231 u32 total_bytes_received_hi;
1232 u32 total_bytes_received_lo;
1233
1234 u32 total_bytes_transmitted_hi;
1235 u32 total_bytes_transmitted_lo;
1236
1237 u32 total_unicast_packets_received_hi;
1238 u32 total_unicast_packets_received_lo;
1239
1240 u32 total_multicast_packets_received_hi;
1241 u32 total_multicast_packets_received_lo;
1242
1243 u32 total_broadcast_packets_received_hi;
1244 u32 total_broadcast_packets_received_lo;
1245
1246 u32 total_unicast_packets_transmitted_hi;
1247 u32 total_unicast_packets_transmitted_lo;
1248
1249 u32 total_multicast_packets_transmitted_hi;
1250 u32 total_multicast_packets_transmitted_lo;
1251
1252 u32 total_broadcast_packets_transmitted_hi;
1253 u32 total_broadcast_packets_transmitted_lo;
1254
1255 u32 valid_bytes_received_hi;
1256 u32 valid_bytes_received_lo;
1257
1258 u32 host_func_stats_end;
1259};
1260
1261
1262#define BCM_5710_FW_MAJOR_VERSION 5
1263#define BCM_5710_FW_MINOR_VERSION 2
1264#define BCM_5710_FW_REVISION_VERSION 13
1265#define BCM_5710_FW_ENGINEERING_VERSION 0
1266#define BCM_5710_FW_COMPILE_FLAGS 1
1267
1268
1269/*
1270 * attention bits
1271 */
1272struct atten_def_status_block {
1273 __le32 attn_bits;
1274 __le32 attn_bits_ack;
1275 u8 status_block_id;
1276 u8 reserved0;
1277 __le16 attn_bits_index;
1278 __le32 reserved1;
1279};
1280
1281
1282/*
1283 * common data for all protocols
1284 */
1285struct doorbell_hdr {
1286 u8 header;
1287#define DOORBELL_HDR_RX (0x1<<0)
1288#define DOORBELL_HDR_RX_SHIFT 0
1289#define DOORBELL_HDR_DB_TYPE (0x1<<1)
1290#define DOORBELL_HDR_DB_TYPE_SHIFT 1
1291#define DOORBELL_HDR_DPM_SIZE (0x3<<2)
1292#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
1293#define DOORBELL_HDR_CONN_TYPE (0xF<<4)
1294#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
1295};
1296
1297/*
1298 * doorbell message sent to the chip
1299 */
1300struct doorbell {
1301#if defined(__BIG_ENDIAN)
1302 u16 zero_fill2;
1303 u8 zero_fill1;
1304 struct doorbell_hdr header;
1305#elif defined(__LITTLE_ENDIAN)
1306 struct doorbell_hdr header;
1307 u8 zero_fill1;
1308 u16 zero_fill2;
1309#endif
1310};
1311
1312
1313/*
1314 * doorbell message sent to the chip
1315 */
1316struct doorbell_set_prod {
1317#if defined(__BIG_ENDIAN)
1318 u16 prod;
1319 u8 zero_fill1;
1320 struct doorbell_hdr header;
1321#elif defined(__LITTLE_ENDIAN)
1322 struct doorbell_hdr header;
1323 u8 zero_fill1;
1324 u16 prod;
1325#endif
1326};
1327
1328
1329/*
1330 * IGU driver acknowledgement register
1331 */
1332struct igu_ack_register {
1333#if defined(__BIG_ENDIAN)
1334 u16 sb_id_and_flags;
1335#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
1336#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
1337#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
1338#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
1339#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
1340#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
1341#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
1342#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
1343#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
1344#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
1345 u16 status_block_index;
1346#elif defined(__LITTLE_ENDIAN)
1347 u16 status_block_index;
1348 u16 sb_id_and_flags;
1349#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
1350#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
1351#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
1352#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
1353#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
1354#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
1355#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
1356#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
1357#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
1358#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
1359#endif
1360};
1361
1362
1363/*
1364 * IGU driver acknowledgement register
1365 */
1366struct igu_backward_compatible {
1367 u32 sb_id_and_flags;
1368#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
1369#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
1370#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
1371#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
1372#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
1373#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
1374#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
1375#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
1376#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
1377#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
1378#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
1379#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
1380 u32 reserved_2;
1381};
1382
1383
1384/*
1385 * IGU driver acknowledgement register
1386 */
1387struct igu_regular {
1388 u32 sb_id_and_flags;
1389#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
1390#define IGU_REGULAR_SB_INDEX_SHIFT 0
1391#define IGU_REGULAR_RESERVED0 (0x1<<20)
1392#define IGU_REGULAR_RESERVED0_SHIFT 20
1393#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
1394#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
1395#define IGU_REGULAR_BUPDATE (0x1<<24)
1396#define IGU_REGULAR_BUPDATE_SHIFT 24
1397#define IGU_REGULAR_ENABLE_INT (0x3<<25)
1398#define IGU_REGULAR_ENABLE_INT_SHIFT 25
1399#define IGU_REGULAR_RESERVED_1 (0x1<<27)
1400#define IGU_REGULAR_RESERVED_1_SHIFT 27
1401#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
1402#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
1403#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
1404#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
1405#define IGU_REGULAR_BCLEANUP (0x1<<31)
1406#define IGU_REGULAR_BCLEANUP_SHIFT 31
1407 u32 reserved_2;
1408};
1409
1410/*
1411 * IGU driver acknowledgement register
1412 */
1413union igu_consprod_reg {
1414 struct igu_regular regular;
1415 struct igu_backward_compatible backward_compatible;
1416};
1417
1418
1419/*
1420 * Parser parsing flags field
1421 */
1422struct parsing_flags {
1423 __le16 flags;
1424#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
1425#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
1426#define PARSING_FLAGS_VLAN (0x1<<1)
1427#define PARSING_FLAGS_VLAN_SHIFT 1
1428#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
1429#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
1430#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
1431#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
1432#define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
1433#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
1434#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6)
1435#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
1436#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7)
1437#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
1438#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9)
1439#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
1440#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10)
1441#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
1442#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11)
1443#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
1444#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12)
1445#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
1446#define PARSING_FLAGS_LLC_SNAP (0x1<<13)
1447#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
1448#define PARSING_FLAGS_RESERVED0 (0x3<<14)
1449#define PARSING_FLAGS_RESERVED0_SHIFT 14
1450};
1451
1452
1453struct regpair {
1454 __le32 lo;
1455 __le32 hi;
1456};
1457
1458
1459/*
1460 * dmae command structure
1461 */
1462struct dmae_command {
1463 u32 opcode;
1464#define DMAE_COMMAND_SRC (0x1<<0)
1465#define DMAE_COMMAND_SRC_SHIFT 0
1466#define DMAE_COMMAND_DST (0x3<<1)
1467#define DMAE_COMMAND_DST_SHIFT 1
1468#define DMAE_COMMAND_C_DST (0x1<<3)
1469#define DMAE_COMMAND_C_DST_SHIFT 3
1470#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
1471#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
1472#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
1473#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
1474#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
1475#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
1476#define DMAE_COMMAND_ENDIANITY (0x3<<9)
1477#define DMAE_COMMAND_ENDIANITY_SHIFT 9
1478#define DMAE_COMMAND_PORT (0x1<<11)
1479#define DMAE_COMMAND_PORT_SHIFT 11
1480#define DMAE_COMMAND_CRC_RESET (0x1<<12)
1481#define DMAE_COMMAND_CRC_RESET_SHIFT 12
1482#define DMAE_COMMAND_SRC_RESET (0x1<<13)
1483#define DMAE_COMMAND_SRC_RESET_SHIFT 13
1484#define DMAE_COMMAND_DST_RESET (0x1<<14)
1485#define DMAE_COMMAND_DST_RESET_SHIFT 14
1486#define DMAE_COMMAND_E1HVN (0x3<<15)
1487#define DMAE_COMMAND_E1HVN_SHIFT 15
1488#define DMAE_COMMAND_RESERVED0 (0x7FFF<<17)
1489#define DMAE_COMMAND_RESERVED0_SHIFT 17
1490 u32 src_addr_lo;
1491 u32 src_addr_hi;
1492 u32 dst_addr_lo;
1493 u32 dst_addr_hi;
1494#if defined(__BIG_ENDIAN)
1495 u16 reserved1;
1496 u16 len;
1497#elif defined(__LITTLE_ENDIAN)
1498 u16 len;
1499 u16 reserved1;
1500#endif
1501 u32 comp_addr_lo;
1502 u32 comp_addr_hi;
1503 u32 comp_val;
1504 u32 crc32;
1505 u32 crc32_c;
1506#if defined(__BIG_ENDIAN)
1507 u16 crc16_c;
1508 u16 crc16;
1509#elif defined(__LITTLE_ENDIAN)
1510 u16 crc16;
1511 u16 crc16_c;
1512#endif
1513#if defined(__BIG_ENDIAN)
1514 u16 reserved2;
1515 u16 crc_t10;
1516#elif defined(__LITTLE_ENDIAN)
1517 u16 crc_t10;
1518 u16 reserved2;
1519#endif
1520#if defined(__BIG_ENDIAN)
1521 u16 xsum8;
1522 u16 xsum16;
1523#elif defined(__LITTLE_ENDIAN)
1524 u16 xsum16;
1525 u16 xsum8;
1526#endif
1527};
1528
1529
1530struct double_regpair {
1531 u32 regpair0_lo;
1532 u32 regpair0_hi;
1533 u32 regpair1_lo;
1534 u32 regpair1_hi;
1535};
1536
1537
1538/*
1539 * The eth storm context of Ustorm (configuration part)
1540 */
1541struct ustorm_eth_st_context_config {
1542#if defined(__BIG_ENDIAN)
1543 u8 flags;
1544#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
1545#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
1546#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
1547#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1548#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1549#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1550#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1551#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1552#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1553#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1554 u8 status_block_id;
1555 u8 clientId;
1556 u8 sb_index_numbers;
1557#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1558#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1559#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1560#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1561#elif defined(__LITTLE_ENDIAN)
1562 u8 sb_index_numbers;
1563#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1564#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1565#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1566#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1567 u8 clientId;
1568 u8 status_block_id;
1569 u8 flags;
1570#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
1571#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
1572#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
1573#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1574#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1575#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1576#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1577#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1578#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1579#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1580#endif
1581#if defined(__BIG_ENDIAN)
1582 u16 bd_buff_size;
1583 u8 statistics_counter_id;
1584 u8 mc_alignment_log_size;
1585#elif defined(__LITTLE_ENDIAN)
1586 u8 mc_alignment_log_size;
1587 u8 statistics_counter_id;
1588 u16 bd_buff_size;
1589#endif
1590#if defined(__BIG_ENDIAN)
1591 u8 __local_sge_prod;
1592 u8 __local_bd_prod;
1593 u16 sge_buff_size;
1594#elif defined(__LITTLE_ENDIAN)
1595 u16 sge_buff_size;
1596 u8 __local_bd_prod;
1597 u8 __local_sge_prod;
1598#endif
1599#if defined(__BIG_ENDIAN)
1600 u16 __sdm_bd_expected_counter;
1601 u8 cstorm_agg_int;
1602 u8 __expected_bds_on_ram;
1603#elif defined(__LITTLE_ENDIAN)
1604 u8 __expected_bds_on_ram;
1605 u8 cstorm_agg_int;
1606 u16 __sdm_bd_expected_counter;
1607#endif
1608#if defined(__BIG_ENDIAN)
1609 u16 __ring_data_ram_addr;
1610 u16 __hc_cstorm_ram_addr;
1611#elif defined(__LITTLE_ENDIAN)
1612 u16 __hc_cstorm_ram_addr;
1613 u16 __ring_data_ram_addr;
1614#endif
1615#if defined(__BIG_ENDIAN)
1616 u8 reserved1;
1617 u8 max_sges_for_packet;
1618 u16 __bd_ring_ram_addr;
1619#elif defined(__LITTLE_ENDIAN)
1620 u16 __bd_ring_ram_addr;
1621 u8 max_sges_for_packet;
1622 u8 reserved1;
1623#endif
1624 u32 bd_page_base_lo;
1625 u32 bd_page_base_hi;
1626 u32 sge_page_base_lo;
1627 u32 sge_page_base_hi;
1628 struct regpair reserved2;
1629};
1630
1631/*
1632 * The eth Rx Buffer Descriptor
1633 */
1634struct eth_rx_bd {
1635 __le32 addr_lo;
1636 __le32 addr_hi;
1637};
1638
1639/*
1640 * The eth Rx SGE Descriptor
1641 */
1642struct eth_rx_sge {
1643 __le32 addr_lo;
1644 __le32 addr_hi;
1645};
1646
1647/*
1648 * Local BDs and SGEs rings (in ETH)
1649 */
1650struct eth_local_rx_rings {
1651 struct eth_rx_bd __local_bd_ring[8];
1652 struct eth_rx_sge __local_sge_ring[10];
1653};
1654
1655/*
1656 * The eth storm context of Ustorm
1657 */
1658struct ustorm_eth_st_context {
1659 struct ustorm_eth_st_context_config common;
1660 struct eth_local_rx_rings __rings;
1661};
1662
1663/*
1664 * The eth storm context of Tstorm
1665 */
1666struct tstorm_eth_st_context {
1667 u32 __reserved0[28];
1668};
1669
1670/*
1671 * The eth aggregative context section of Xstorm
1672 */
1673struct xstorm_eth_extra_ag_context_section {
1674#if defined(__BIG_ENDIAN)
1675 u8 __tcp_agg_vars1;
1676 u8 __reserved50;
1677 u16 __mss;
1678#elif defined(__LITTLE_ENDIAN)
1679 u16 __mss;
1680 u8 __reserved50;
1681 u8 __tcp_agg_vars1;
1682#endif
1683 u32 __snd_nxt;
1684 u32 __tx_wnd;
1685 u32 __snd_una;
1686 u32 __reserved53;
1687#if defined(__BIG_ENDIAN)
1688 u8 __agg_val8_th;
1689 u8 __agg_val8;
1690 u16 __tcp_agg_vars2;
1691#elif defined(__LITTLE_ENDIAN)
1692 u16 __tcp_agg_vars2;
1693 u8 __agg_val8;
1694 u8 __agg_val8_th;
1695#endif
1696 u32 __reserved58;
1697 u32 __reserved59;
1698 u32 __reserved60;
1699 u32 __reserved61;
1700#if defined(__BIG_ENDIAN)
1701 u16 __agg_val7_th;
1702 u16 __agg_val7;
1703#elif defined(__LITTLE_ENDIAN)
1704 u16 __agg_val7;
1705 u16 __agg_val7_th;
1706#endif
1707#if defined(__BIG_ENDIAN)
1708 u8 __tcp_agg_vars5;
1709 u8 __tcp_agg_vars4;
1710 u8 __tcp_agg_vars3;
1711 u8 __reserved62;
1712#elif defined(__LITTLE_ENDIAN)
1713 u8 __reserved62;
1714 u8 __tcp_agg_vars3;
1715 u8 __tcp_agg_vars4;
1716 u8 __tcp_agg_vars5;
1717#endif
1718 u32 __tcp_agg_vars6;
1719#if defined(__BIG_ENDIAN)
1720 u16 __agg_misc6;
1721 u16 __tcp_agg_vars7;
1722#elif defined(__LITTLE_ENDIAN)
1723 u16 __tcp_agg_vars7;
1724 u16 __agg_misc6;
1725#endif
1726 u32 __agg_val10;
1727 u32 __agg_val10_th;
1728#if defined(__BIG_ENDIAN)
1729 u16 __reserved3;
1730 u8 __reserved2;
1731 u8 __da_only_cnt;
1732#elif defined(__LITTLE_ENDIAN)
1733 u8 __da_only_cnt;
1734 u8 __reserved2;
1735 u16 __reserved3;
1736#endif
1737};
1738
1739/*
1740 * The eth aggregative context of Xstorm
1741 */
1742struct xstorm_eth_ag_context {
1743#if defined(__BIG_ENDIAN)
1744 u16 agg_val1;
1745 u8 __agg_vars1;
1746 u8 __state;
1747#elif defined(__LITTLE_ENDIAN)
1748 u8 __state;
1749 u8 __agg_vars1;
1750 u16 agg_val1;
1751#endif
1752#if defined(__BIG_ENDIAN)
1753 u8 cdu_reserved;
1754 u8 __agg_vars4;
1755 u8 __agg_vars3;
1756 u8 __agg_vars2;
1757#elif defined(__LITTLE_ENDIAN)
1758 u8 __agg_vars2;
1759 u8 __agg_vars3;
1760 u8 __agg_vars4;
1761 u8 cdu_reserved;
1762#endif
1763 u32 __bd_prod;
1764#if defined(__BIG_ENDIAN)
1765 u16 __agg_vars5;
1766 u16 __agg_val4_th;
1767#elif defined(__LITTLE_ENDIAN)
1768 u16 __agg_val4_th;
1769 u16 __agg_vars5;
1770#endif
1771 struct xstorm_eth_extra_ag_context_section __extra_section;
1772#if defined(__BIG_ENDIAN)
1773 u16 __agg_vars7;
1774 u8 __agg_val3_th;
1775 u8 __agg_vars6;
1776#elif defined(__LITTLE_ENDIAN)
1777 u8 __agg_vars6;
1778 u8 __agg_val3_th;
1779 u16 __agg_vars7;
1780#endif
1781#if defined(__BIG_ENDIAN)
1782 u16 __agg_val11_th;
1783 u16 __agg_val11;
1784#elif defined(__LITTLE_ENDIAN)
1785 u16 __agg_val11;
1786 u16 __agg_val11_th;
1787#endif
1788#if defined(__BIG_ENDIAN)
1789 u8 __reserved1;
1790 u8 __agg_val6_th;
1791 u16 __agg_val9;
1792#elif defined(__LITTLE_ENDIAN)
1793 u16 __agg_val9;
1794 u8 __agg_val6_th;
1795 u8 __reserved1;
1796#endif
1797#if defined(__BIG_ENDIAN)
1798 u16 __agg_val2_th;
1799 u16 __agg_val2;
1800#elif defined(__LITTLE_ENDIAN)
1801 u16 __agg_val2;
1802 u16 __agg_val2_th;
1803#endif
1804 u32 __agg_vars8;
1805#if defined(__BIG_ENDIAN)
1806 u16 __agg_misc0;
1807 u16 __agg_val4;
1808#elif defined(__LITTLE_ENDIAN)
1809 u16 __agg_val4;
1810 u16 __agg_misc0;
1811#endif
1812#if defined(__BIG_ENDIAN)
1813 u8 __agg_val3;
1814 u8 __agg_val6;
1815 u8 __agg_val5_th;
1816 u8 __agg_val5;
1817#elif defined(__LITTLE_ENDIAN)
1818 u8 __agg_val5;
1819 u8 __agg_val5_th;
1820 u8 __agg_val6;
1821 u8 __agg_val3;
1822#endif
1823#if defined(__BIG_ENDIAN)
1824 u16 __agg_misc1;
1825 u16 __bd_ind_max_val;
1826#elif defined(__LITTLE_ENDIAN)
1827 u16 __bd_ind_max_val;
1828 u16 __agg_misc1;
1829#endif
1830 u32 __reserved57;
1831 u32 __agg_misc4;
1832 u32 __agg_misc5;
1833};
1834
1835/*
1836 * The eth extra aggregative context section of Tstorm
1837 */
1838struct tstorm_eth_extra_ag_context_section {
1839 u32 __agg_val1;
1840#if defined(__BIG_ENDIAN)
1841 u8 __tcp_agg_vars2;
1842 u8 __agg_val3;
1843 u16 __agg_val2;
1844#elif defined(__LITTLE_ENDIAN)
1845 u16 __agg_val2;
1846 u8 __agg_val3;
1847 u8 __tcp_agg_vars2;
1848#endif
1849#if defined(__BIG_ENDIAN)
1850 u16 __agg_val5;
1851 u8 __agg_val6;
1852 u8 __tcp_agg_vars3;
1853#elif defined(__LITTLE_ENDIAN)
1854 u8 __tcp_agg_vars3;
1855 u8 __agg_val6;
1856 u16 __agg_val5;
1857#endif
1858 u32 __reserved63;
1859 u32 __reserved64;
1860 u32 __reserved65;
1861 u32 __reserved66;
1862 u32 __reserved67;
1863 u32 __tcp_agg_vars1;
1864 u32 __reserved61;
1865 u32 __reserved62;
1866 u32 __reserved2;
1867};
1868
1869/*
1870 * The eth aggregative context of Tstorm
1871 */
1872struct tstorm_eth_ag_context {
1873#if defined(__BIG_ENDIAN)
1874 u16 __reserved54;
1875 u8 __agg_vars1;
1876 u8 __state;
1877#elif defined(__LITTLE_ENDIAN)
1878 u8 __state;
1879 u8 __agg_vars1;
1880 u16 __reserved54;
1881#endif
1882#if defined(__BIG_ENDIAN)
1883 u16 __agg_val4;
1884 u16 __agg_vars2;
1885#elif defined(__LITTLE_ENDIAN)
1886 u16 __agg_vars2;
1887 u16 __agg_val4;
1888#endif
1889 struct tstorm_eth_extra_ag_context_section __extra_section;
1890};
1891
1892/*
1893 * The eth aggregative context of Cstorm
1894 */
1895struct cstorm_eth_ag_context {
1896 u32 __agg_vars1;
1897#if defined(__BIG_ENDIAN)
1898 u8 __aux1_th;
1899 u8 __aux1_val;
1900 u16 __agg_vars2;
1901#elif defined(__LITTLE_ENDIAN)
1902 u16 __agg_vars2;
1903 u8 __aux1_val;
1904 u8 __aux1_th;
1905#endif
1906 u32 __num_of_treated_packet;
1907 u32 __last_packet_treated;
1908#if defined(__BIG_ENDIAN)
1909 u16 __reserved58;
1910 u16 __reserved57;
1911#elif defined(__LITTLE_ENDIAN)
1912 u16 __reserved57;
1913 u16 __reserved58;
1914#endif
1915#if defined(__BIG_ENDIAN)
1916 u8 __reserved62;
1917 u8 __reserved61;
1918 u8 __reserved60;
1919 u8 __reserved59;
1920#elif defined(__LITTLE_ENDIAN)
1921 u8 __reserved59;
1922 u8 __reserved60;
1923 u8 __reserved61;
1924 u8 __reserved62;
1925#endif
1926#if defined(__BIG_ENDIAN)
1927 u16 __reserved64;
1928 u16 __reserved63;
1929#elif defined(__LITTLE_ENDIAN)
1930 u16 __reserved63;
1931 u16 __reserved64;
1932#endif
1933 u32 __reserved65;
1934#if defined(__BIG_ENDIAN)
1935 u16 __agg_vars3;
1936 u16 __rq_inv_cnt;
1937#elif defined(__LITTLE_ENDIAN)
1938 u16 __rq_inv_cnt;
1939 u16 __agg_vars3;
1940#endif
1941#if defined(__BIG_ENDIAN)
1942 u16 __packet_index_th;
1943 u16 __packet_index;
1944#elif defined(__LITTLE_ENDIAN)
1945 u16 __packet_index;
1946 u16 __packet_index_th;
1947#endif
1948};
1949
1950/*
1951 * The eth aggregative context of Ustorm
1952 */
1953struct ustorm_eth_ag_context {
1954#if defined(__BIG_ENDIAN)
1955 u8 __aux_counter_flags;
1956 u8 __agg_vars2;
1957 u8 __agg_vars1;
1958 u8 __state;
1959#elif defined(__LITTLE_ENDIAN)
1960 u8 __state;
1961 u8 __agg_vars1;
1962 u8 __agg_vars2;
1963 u8 __aux_counter_flags;
1964#endif
1965#if defined(__BIG_ENDIAN)
1966 u8 cdu_usage;
1967 u8 __agg_misc2;
1968 u16 __agg_misc1;
1969#elif defined(__LITTLE_ENDIAN)
1970 u16 __agg_misc1;
1971 u8 __agg_misc2;
1972 u8 cdu_usage;
1973#endif
1974 u32 __agg_misc4;
1975#if defined(__BIG_ENDIAN)
1976 u8 __agg_val3_th;
1977 u8 __agg_val3;
1978 u16 __agg_misc3;
1979#elif defined(__LITTLE_ENDIAN)
1980 u16 __agg_misc3;
1981 u8 __agg_val3;
1982 u8 __agg_val3_th;
1983#endif
1984 u32 __agg_val1;
1985 u32 __agg_misc4_th;
1986#if defined(__BIG_ENDIAN)
1987 u16 __agg_val2_th;
1988 u16 __agg_val2;
1989#elif defined(__LITTLE_ENDIAN)
1990 u16 __agg_val2;
1991 u16 __agg_val2_th;
1992#endif
1993#if defined(__BIG_ENDIAN)
1994 u16 __reserved2;
1995 u8 __decision_rules;
1996 u8 __decision_rule_enable_bits;
1997#elif defined(__LITTLE_ENDIAN)
1998 u8 __decision_rule_enable_bits;
1999 u8 __decision_rules;
2000 u16 __reserved2;
2001#endif
2002};
2003
2004/*
2005 * Timers connection context
2006 */
2007struct timers_block_context {
2008 u32 __reserved_0;
2009 u32 __reserved_1;
2010 u32 __reserved_2;
2011 u32 flags;
2012#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
2013#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
2014#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
2015#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
2016#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
2017#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
2018};
2019
2020/*
2021 * structure for easy accessibility to assembler
2022 */
2023struct eth_tx_bd_flags {
2024 u8 as_bitfield;
2025#define ETH_TX_BD_FLAGS_VLAN_TAG (0x1<<0)
2026#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0
2027#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1)
2028#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1
2029#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<2)
2030#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 2
2031#define ETH_TX_BD_FLAGS_END_BD (0x1<<3)
2032#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3
2033#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
2034#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
2035#define ETH_TX_BD_FLAGS_HDR_POOL (0x1<<5)
2036#define ETH_TX_BD_FLAGS_HDR_POOL_SHIFT 5
2037#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
2038#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
2039#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
2040#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
2041};
2042
2043/*
2044 * The eth Tx Buffer Descriptor
2045 */
2046struct eth_tx_start_bd {
2047 __le32 addr_lo;
2048 __le32 addr_hi;
2049 __le16 nbd;
2050 __le16 nbytes;
2051 __le16 vlan;
2052 struct eth_tx_bd_flags bd_flags;
2053 u8 general_data;
2054#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
2055#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
2056#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
2057#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
2058};
2059
2060/*
2061 * Tx regular BD structure
2062 */
2063struct eth_tx_bd {
2064 u32 addr_lo;
2065 u32 addr_hi;
2066 u16 total_pkt_bytes;
2067 u16 nbytes;
2068 u8 reserved[4];
2069};
2070
2071/*
2072 * Tx parsing BD structure for ETH,Relevant in START
2073 */
2074struct eth_tx_parse_bd {
2075 u8 global_data;
2076#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0)
2077#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0
2078#define ETH_TX_PARSE_BD_UDP_CS_FLG (0x1<<4)
2079#define ETH_TX_PARSE_BD_UDP_CS_FLG_SHIFT 4
2080#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
2081#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
2082#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6)
2083#define ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT 6
2084#define ETH_TX_PARSE_BD_NS_FLG (0x1<<7)
2085#define ETH_TX_PARSE_BD_NS_FLG_SHIFT 7
2086 u8 tcp_flags;
2087#define ETH_TX_PARSE_BD_FIN_FLG (0x1<<0)
2088#define ETH_TX_PARSE_BD_FIN_FLG_SHIFT 0
2089#define ETH_TX_PARSE_BD_SYN_FLG (0x1<<1)
2090#define ETH_TX_PARSE_BD_SYN_FLG_SHIFT 1
2091#define ETH_TX_PARSE_BD_RST_FLG (0x1<<2)
2092#define ETH_TX_PARSE_BD_RST_FLG_SHIFT 2
2093#define ETH_TX_PARSE_BD_PSH_FLG (0x1<<3)
2094#define ETH_TX_PARSE_BD_PSH_FLG_SHIFT 3
2095#define ETH_TX_PARSE_BD_ACK_FLG (0x1<<4)
2096#define ETH_TX_PARSE_BD_ACK_FLG_SHIFT 4
2097#define ETH_TX_PARSE_BD_URG_FLG (0x1<<5)
2098#define ETH_TX_PARSE_BD_URG_FLG_SHIFT 5
2099#define ETH_TX_PARSE_BD_ECE_FLG (0x1<<6)
2100#define ETH_TX_PARSE_BD_ECE_FLG_SHIFT 6
2101#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7)
2102#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7
2103 u8 ip_hlen;
2104 s8 reserved;
2105 __le16 total_hlen;
2106 __le16 tcp_pseudo_csum;
2107 __le16 lso_mss;
2108 __le16 ip_id;
2109 __le32 tcp_send_seq;
2110};
2111
2112/*
2113 * The last BD in the BD memory will hold a pointer to the next BD memory
2114 */
2115struct eth_tx_next_bd {
2116 __le32 addr_lo;
2117 __le32 addr_hi;
2118 u8 reserved[8];
2119};
2120
2121/*
2122 * union for 4 Bd types
2123 */
2124union eth_tx_bd_types {
2125 struct eth_tx_start_bd start_bd;
2126 struct eth_tx_bd reg_bd;
2127 struct eth_tx_parse_bd parse_bd;
2128 struct eth_tx_next_bd next_bd;
2129};
2130
2131/*
2132 * The eth storm context of Xstorm
2133 */
2134struct xstorm_eth_st_context {
2135 u32 tx_bd_page_base_lo;
2136 u32 tx_bd_page_base_hi;
2137#if defined(__BIG_ENDIAN)
2138 u16 tx_bd_cons;
2139 u8 statistics_data;
2140#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2141#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2142#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2143#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2144 u8 __local_tx_bd_prod;
2145#elif defined(__LITTLE_ENDIAN)
2146 u8 __local_tx_bd_prod;
2147 u8 statistics_data;
2148#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2149#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2150#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2151#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2152 u16 tx_bd_cons;
2153#endif
2154 u32 __reserved1;
2155 u32 __reserved2;
2156#if defined(__BIG_ENDIAN)
2157 u8 __ram_cache_index;
2158 u8 __double_buffer_client;
2159 u16 __pkt_cons;
2160#elif defined(__LITTLE_ENDIAN)
2161 u16 __pkt_cons;
2162 u8 __double_buffer_client;
2163 u8 __ram_cache_index;
2164#endif
2165#if defined(__BIG_ENDIAN)
2166 u16 __statistics_address;
2167 u16 __gso_next;
2168#elif defined(__LITTLE_ENDIAN)
2169 u16 __gso_next;
2170 u16 __statistics_address;
2171#endif
2172#if defined(__BIG_ENDIAN)
2173 u8 __local_tx_bd_cons;
2174 u8 safc_group_num;
2175 u8 safc_group_en;
2176 u8 __is_eth_conn;
2177#elif defined(__LITTLE_ENDIAN)
2178 u8 __is_eth_conn;
2179 u8 safc_group_en;
2180 u8 safc_group_num;
2181 u8 __local_tx_bd_cons;
2182#endif
2183 union eth_tx_bd_types __bds[13];
2184};
2185
2186/*
2187 * The eth storm context of Cstorm
2188 */
2189struct cstorm_eth_st_context {
2190#if defined(__BIG_ENDIAN)
2191 u16 __reserved0;
2192 u8 sb_index_number;
2193 u8 status_block_id;
2194#elif defined(__LITTLE_ENDIAN)
2195 u8 status_block_id;
2196 u8 sb_index_number;
2197 u16 __reserved0;
2198#endif
2199 u32 __reserved1[3];
2200};
2201
2202/*
2203 * Ethernet connection context
2204 */
2205struct eth_context {
2206 struct ustorm_eth_st_context ustorm_st_context;
2207 struct tstorm_eth_st_context tstorm_st_context;
2208 struct xstorm_eth_ag_context xstorm_ag_context;
2209 struct tstorm_eth_ag_context tstorm_ag_context;
2210 struct cstorm_eth_ag_context cstorm_ag_context;
2211 struct ustorm_eth_ag_context ustorm_ag_context;
2212 struct timers_block_context timers_context;
2213 struct xstorm_eth_st_context xstorm_st_context;
2214 struct cstorm_eth_st_context cstorm_st_context;
2215};
2216
2217
2218/*
2219 * Ethernet doorbell
2220 */
2221struct eth_tx_doorbell {
2222#if defined(__BIG_ENDIAN)
2223 u16 npackets;
2224 u8 params;
2225#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
2226#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
2227#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
2228#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
2229#define ETH_TX_DOORBELL_SPARE (0x1<<7)
2230#define ETH_TX_DOORBELL_SPARE_SHIFT 7
2231 struct doorbell_hdr hdr;
2232#elif defined(__LITTLE_ENDIAN)
2233 struct doorbell_hdr hdr;
2234 u8 params;
2235#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
2236#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
2237#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
2238#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
2239#define ETH_TX_DOORBELL_SPARE (0x1<<7)
2240#define ETH_TX_DOORBELL_SPARE_SHIFT 7
2241 u16 npackets;
2242#endif
2243};
2244
2245
2246/*
2247 * cstorm default status block, generated by ustorm
2248 */
2249struct cstorm_def_status_block_u {
2250 __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES];
2251 __le16 status_block_index;
2252 u8 func;
2253 u8 status_block_id;
2254 __le32 __flags;
2255};
2256
2257/*
2258 * cstorm default status block, generated by cstorm
2259 */
2260struct cstorm_def_status_block_c {
2261 __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES];
2262 __le16 status_block_index;
2263 u8 func;
2264 u8 status_block_id;
2265 __le32 __flags;
2266};
2267
2268/*
2269 * xstorm status block
2270 */
2271struct xstorm_def_status_block {
2272 __le16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES];
2273 __le16 status_block_index;
2274 u8 func;
2275 u8 status_block_id;
2276 __le32 __flags;
2277};
2278
2279/*
2280 * tstorm status block
2281 */
2282struct tstorm_def_status_block {
2283 __le16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES];
2284 __le16 status_block_index;
2285 u8 func;
2286 u8 status_block_id;
2287 __le32 __flags;
2288};
2289
2290/*
2291 * host status block
2292 */
2293struct host_def_status_block {
2294 struct atten_def_status_block atten_status_block;
2295 struct cstorm_def_status_block_u u_def_status_block;
2296 struct cstorm_def_status_block_c c_def_status_block;
2297 struct xstorm_def_status_block x_def_status_block;
2298 struct tstorm_def_status_block t_def_status_block;
2299};
2300
2301
2302/*
2303 * cstorm status block, generated by ustorm
2304 */
2305struct cstorm_status_block_u {
2306 __le16 index_values[HC_USTORM_SB_NUM_INDICES];
2307 __le16 status_block_index;
2308 u8 func;
2309 u8 status_block_id;
2310 __le32 __flags;
2311};
2312
2313/*
2314 * cstorm status block, generated by cstorm
2315 */
2316struct cstorm_status_block_c {
2317 __le16 index_values[HC_CSTORM_SB_NUM_INDICES];
2318 __le16 status_block_index;
2319 u8 func;
2320 u8 status_block_id;
2321 __le32 __flags;
2322};
2323
2324/*
2325 * host status block
2326 */
2327struct host_status_block {
2328 struct cstorm_status_block_u u_status_block;
2329 struct cstorm_status_block_c c_status_block;
2330};
2331
2332
2333/*
2334 * The data for RSS setup ramrod
2335 */
2336struct eth_client_setup_ramrod_data {
2337 u32 client_id;
2338 u8 is_rdma;
2339 u8 is_fcoe;
2340 u16 reserved1;
2341};
2342
2343
2344/*
2345 * regular eth FP CQE parameters struct
2346 */
2347struct eth_fast_path_rx_cqe {
2348 u8 type_error_flags;
2349#define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0)
2350#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
2351#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1)
2352#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1
2353#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2)
2354#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2
2355#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3)
2356#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3
2357#define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4)
2358#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
2359#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
2360#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
2361#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
2362#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
2363 u8 status_flags;
2364#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
2365#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
2366#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
2367#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
2368#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
2369#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
2370#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
2371#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
2372#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
2373#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
2374#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
2375#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
2376 u8 placement_offset;
2377 u8 queue_index;
2378 __le32 rss_hash_result;
2379 __le16 vlan_tag;
2380 __le16 pkt_len;
2381 __le16 len_on_bd;
2382 struct parsing_flags pars_flags;
2383 __le16 sgl[8];
2384};
2385
2386
2387/*
2388 * The data for RSS setup ramrod
2389 */
2390struct eth_halt_ramrod_data {
2391 u32 client_id;
2392 u32 reserved0;
2393};
2394
2395
2396/*
2397 * The data for statistics query ramrod
2398 */
2399struct eth_query_ramrod_data {
2400#if defined(__BIG_ENDIAN)
2401 u8 reserved0;
2402 u8 collect_port;
2403 u16 drv_counter;
2404#elif defined(__LITTLE_ENDIAN)
2405 u16 drv_counter;
2406 u8 collect_port;
2407 u8 reserved0;
2408#endif
2409 u32 ctr_id_vector;
2410};
2411
2412
2413/*
2414 * Place holder for ramrods protocol specific data
2415 */
2416struct ramrod_data {
2417 __le32 data_lo;
2418 __le32 data_hi;
2419};
2420
2421/*
2422 * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
2423 */
2424union eth_ramrod_data {
2425 struct ramrod_data general;
2426};
2427
2428
2429/*
2430 * Eth Rx Cqe structure- general structure for ramrods
2431 */
2432struct common_ramrod_eth_rx_cqe {
2433 u8 ramrod_type;
2434#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
2435#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
2436#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1)
2437#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1
2438#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2)
2439#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2
2440 u8 conn_type;
2441 __le16 reserved1;
2442 __le32 conn_and_cmd_data;
2443#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
2444#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
2445#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
2446#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
2447 struct ramrod_data protocol_data;
2448 __le32 reserved2[4];
2449};
2450
2451/*
2452 * Rx Last CQE in page (in ETH)
2453 */
2454struct eth_rx_cqe_next_page {
2455 __le32 addr_lo;
2456 __le32 addr_hi;
2457 __le32 reserved[6];
2458};
2459
2460/*
2461 * union for all eth rx cqe types (fix their sizes)
2462 */
2463union eth_rx_cqe {
2464 struct eth_fast_path_rx_cqe fast_path_cqe;
2465 struct common_ramrod_eth_rx_cqe ramrod_cqe;
2466 struct eth_rx_cqe_next_page next_page_cqe;
2467};
2468
2469
2470/*
2471 * common data for all protocols
2472 */
2473struct spe_hdr {
2474 __le32 conn_and_cmd_data;
2475#define SPE_HDR_CID (0xFFFFFF<<0)
2476#define SPE_HDR_CID_SHIFT 0
2477#define SPE_HDR_CMD_ID (0xFF<<24)
2478#define SPE_HDR_CMD_ID_SHIFT 24
2479 __le16 type;
2480#define SPE_HDR_CONN_TYPE (0xFF<<0)
2481#define SPE_HDR_CONN_TYPE_SHIFT 0
2482#define SPE_HDR_COMMON_RAMROD (0xFF<<8)
2483#define SPE_HDR_COMMON_RAMROD_SHIFT 8
2484 __le16 reserved;
2485};
2486
2487/*
2488 * Ethernet slow path element
2489 */
2490union eth_specific_data {
2491 u8 protocol_data[8];
2492 struct regpair mac_config_addr;
2493 struct eth_client_setup_ramrod_data client_setup_ramrod_data;
2494 struct eth_halt_ramrod_data halt_ramrod_data;
2495 struct regpair leading_cqe_addr;
2496 struct regpair update_data_addr;
2497 struct eth_query_ramrod_data query_ramrod_data;
2498};
2499
2500/*
2501 * Ethernet slow path element
2502 */
2503struct eth_spe {
2504 struct spe_hdr hdr;
2505 union eth_specific_data data;
2506};
2507
2508
2509/*
2510 * array of 13 bds as appears in the eth xstorm context
2511 */
2512struct eth_tx_bds_array {
2513 union eth_tx_bd_types bds[13];
2514};
2515
2516
2517/*
2518 * Common configuration parameters per function in Tstorm
2519 */
2520struct tstorm_eth_function_common_config {
2521#if defined(__BIG_ENDIAN)
2522 u8 leading_client_id;
2523 u8 rss_result_mask;
2524 u16 config_flags;
2525#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
2526#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
2527#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
2528#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
2529#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
2530#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
2531#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
2532#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2533#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2534#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2535#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
2536#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
2537#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
2538#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
2539#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
2540#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
2541#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2542#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2543#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2544#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2545#elif defined(__LITTLE_ENDIAN)
2546 u16 config_flags;
2547#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
2548#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
2549#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
2550#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
2551#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
2552#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
2553#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
2554#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2555#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2556#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2557#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7)
2558#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7
2559#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8)
2560#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8
2561#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9)
2562#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9
2563#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2564#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2565#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2566#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2567 u8 rss_result_mask;
2568 u8 leading_client_id;
2569#endif
2570 u16 vlan_id[2];
2571};
2572
2573/*
2574 * RSS idirection table update configuration
2575 */
2576struct rss_update_config {
2577#if defined(__BIG_ENDIAN)
2578 u16 toe_rss_bitmap;
2579 u16 flags;
2580#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
2581#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
2582#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
2583#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
2584#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
2585#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
2586#elif defined(__LITTLE_ENDIAN)
2587 u16 flags;
2588#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
2589#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
2590#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
2591#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
2592#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
2593#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
2594 u16 toe_rss_bitmap;
2595#endif
2596 u32 reserved1;
2597};
2598
2599/*
2600 * parameters for eth update ramrod
2601 */
2602struct eth_update_ramrod_data {
2603 struct tstorm_eth_function_common_config func_config;
2604 u8 indirectionTable[128];
2605 struct rss_update_config rss_config;
2606};
2607
2608
2609/*
2610 * MAC filtering configuration command header
2611 */
2612struct mac_configuration_hdr {
2613 u8 length;
2614 u8 offset;
2615 u16 client_id;
2616 u32 reserved1;
2617};
2618
2619/*
2620 * MAC address in list for ramrod
2621 */
2622struct tstorm_cam_entry {
2623 __le16 lsb_mac_addr;
2624 __le16 middle_mac_addr;
2625 __le16 msb_mac_addr;
2626 __le16 flags;
2627#define TSTORM_CAM_ENTRY_PORT_ID (0x1<<0)
2628#define TSTORM_CAM_ENTRY_PORT_ID_SHIFT 0
2629#define TSTORM_CAM_ENTRY_RSRVVAL0 (0x7<<1)
2630#define TSTORM_CAM_ENTRY_RSRVVAL0_SHIFT 1
2631#define TSTORM_CAM_ENTRY_RESERVED0 (0xFFF<<4)
2632#define TSTORM_CAM_ENTRY_RESERVED0_SHIFT 4
2633};
2634
2635/*
2636 * MAC filtering: CAM target table entry
2637 */
2638struct tstorm_cam_target_table_entry {
2639 u8 flags;
2640#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST (0x1<<0)
2641#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST_SHIFT 0
2642#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<1)
2643#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 1
2644#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE (0x1<<2)
2645#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE_SHIFT 2
2646#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC (0x1<<3)
2647#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3
2648#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4)
2649#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4
2650 u8 reserved1;
2651 u16 vlan_id;
2652 u32 clients_bit_vector;
2653};
2654
2655/*
2656 * MAC address in list for ramrod
2657 */
2658struct mac_configuration_entry {
2659 struct tstorm_cam_entry cam_entry;
2660 struct tstorm_cam_target_table_entry target_table_entry;
2661};
2662
2663/*
2664 * MAC filtering configuration command
2665 */
2666struct mac_configuration_cmd {
2667 struct mac_configuration_hdr hdr;
2668 struct mac_configuration_entry config_table[64];
2669};
2670
2671
2672/*
2673 * MAC address in list for ramrod
2674 */
2675struct mac_configuration_entry_e1h {
2676 __le16 lsb_mac_addr;
2677 __le16 middle_mac_addr;
2678 __le16 msb_mac_addr;
2679 __le16 vlan_id;
2680 __le16 e1hov_id;
2681 u8 reserved0;
2682 u8 flags;
2683#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0)
2684#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0
2685#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1)
2686#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1
2687#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2)
2688#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2
2689#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1 (0x1F<<3)
2690#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1_SHIFT 3
2691 u32 clients_bit_vector;
2692};
2693
2694/*
2695 * MAC filtering configuration command
2696 */
2697struct mac_configuration_cmd_e1h {
2698 struct mac_configuration_hdr hdr;
2699 struct mac_configuration_entry_e1h config_table[32];
2700};
2701
2702
2703/*
2704 * approximate-match multicast filtering for E1H per function in Tstorm
2705 */
2706struct tstorm_eth_approximate_match_multicast_filtering {
2707 u32 mcast_add_hash_bit_array[8];
2708};
2709
2710
2711/*
2712 * Configuration parameters per client in Tstorm
2713 */
2714struct tstorm_eth_client_config {
2715#if defined(__BIG_ENDIAN)
2716 u8 reserved0;
2717 u8 statistics_counter_id;
2718 u16 mtu;
2719#elif defined(__LITTLE_ENDIAN)
2720 u16 mtu;
2721 u8 statistics_counter_id;
2722 u8 reserved0;
2723#endif
2724#if defined(__BIG_ENDIAN)
2725 u16 drop_flags;
2726#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2727#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2728#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2729#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2730#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2731#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2732#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2733#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2734#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2735#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2736 u16 config_flags;
2737#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2738#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2739#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2740#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2741#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2742#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2743#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2744#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2745#elif defined(__LITTLE_ENDIAN)
2746 u16 config_flags;
2747#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2748#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2749#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2750#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2751#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2752#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2753#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2754#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2755 u16 drop_flags;
2756#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2757#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2758#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2759#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2760#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2761#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2762#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2763#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2764#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2765#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2766#endif
2767};
2768
2769
2770/*
2771 * MAC filtering configuration parameters per port in Tstorm
2772 */
2773struct tstorm_eth_mac_filter_config {
2774 u32 ucast_drop_all;
2775 u32 ucast_accept_all;
2776 u32 mcast_drop_all;
2777 u32 mcast_accept_all;
2778 u32 bcast_drop_all;
2779 u32 bcast_accept_all;
2780 u32 strict_vlan;
2781 u32 vlan_filter[2];
2782 u32 reserved;
2783};
2784
2785
2786/*
2787 * common flag to indicate existance of TPA.
2788 */
2789struct tstorm_eth_tpa_exist {
2790#if defined(__BIG_ENDIAN)
2791 u16 reserved1;
2792 u8 reserved0;
2793 u8 tpa_exist;
2794#elif defined(__LITTLE_ENDIAN)
2795 u8 tpa_exist;
2796 u8 reserved0;
2797 u16 reserved1;
2798#endif
2799 u32 reserved2;
2800};
2801
2802
2803/*
2804 * rx rings pause data for E1h only
2805 */
2806struct ustorm_eth_rx_pause_data_e1h {
2807#if defined(__BIG_ENDIAN)
2808 u16 bd_thr_low;
2809 u16 cqe_thr_low;
2810#elif defined(__LITTLE_ENDIAN)
2811 u16 cqe_thr_low;
2812 u16 bd_thr_low;
2813#endif
2814#if defined(__BIG_ENDIAN)
2815 u16 cos;
2816 u16 sge_thr_low;
2817#elif defined(__LITTLE_ENDIAN)
2818 u16 sge_thr_low;
2819 u16 cos;
2820#endif
2821#if defined(__BIG_ENDIAN)
2822 u16 bd_thr_high;
2823 u16 cqe_thr_high;
2824#elif defined(__LITTLE_ENDIAN)
2825 u16 cqe_thr_high;
2826 u16 bd_thr_high;
2827#endif
2828#if defined(__BIG_ENDIAN)
2829 u16 reserved0;
2830 u16 sge_thr_high;
2831#elif defined(__LITTLE_ENDIAN)
2832 u16 sge_thr_high;
2833 u16 reserved0;
2834#endif
2835};
2836
2837
2838/*
2839 * Three RX producers for ETH
2840 */
2841struct ustorm_eth_rx_producers {
2842#if defined(__BIG_ENDIAN)
2843 u16 bd_prod;
2844 u16 cqe_prod;
2845#elif defined(__LITTLE_ENDIAN)
2846 u16 cqe_prod;
2847 u16 bd_prod;
2848#endif
2849#if defined(__BIG_ENDIAN)
2850 u16 reserved;
2851 u16 sge_prod;
2852#elif defined(__LITTLE_ENDIAN)
2853 u16 sge_prod;
2854 u16 reserved;
2855#endif
2856};
2857
2858
2859/*
2860 * per-port SAFC demo variables
2861 */
2862struct cmng_flags_per_port {
2863 u8 con_number[NUM_OF_PROTOCOLS];
2864 u32 cmng_enables;
2865#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
2866#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
2867#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
2868#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
2869#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL (0x1<<2)
2870#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL_SHIFT 2
2871#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL (0x1<<3)
2872#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
2873#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
2874#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
2875#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x7FFFFFF<<5)
2876#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 5
2877};
2878
2879
2880/*
2881 * per-port rate shaping variables
2882 */
2883struct rate_shaping_vars_per_port {
2884 u32 rs_periodic_timeout;
2885 u32 rs_threshold;
2886};
2887
2888/*
2889 * per-port fairness variables
2890 */
2891struct fairness_vars_per_port {
2892 u32 upper_bound;
2893 u32 fair_threshold;
2894 u32 fairness_timeout;
2895};
2896
2897/*
2898 * per-port SAFC variables
2899 */
2900struct safc_struct_per_port {
2901#if defined(__BIG_ENDIAN)
2902 u16 __reserved1;
2903 u8 __reserved0;
2904 u8 safc_timeout_usec;
2905#elif defined(__LITTLE_ENDIAN)
2906 u8 safc_timeout_usec;
2907 u8 __reserved0;
2908 u16 __reserved1;
2909#endif
2910 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
2911};
2912
2913/*
2914 * Per-port congestion management variables
2915 */
2916struct cmng_struct_per_port {
2917 struct rate_shaping_vars_per_port rs_vars;
2918 struct fairness_vars_per_port fair_vars;
2919 struct safc_struct_per_port safc_vars;
2920 struct cmng_flags_per_port flags;
2921};
2922
2923
2924/*
2925 * Dynamic host coalescing init parameters
2926 */
2927struct dynamic_hc_config {
2928 u32 threshold[3];
2929 u8 shift_per_protocol[HC_USTORM_SB_NUM_INDICES];
2930 u8 hc_timeout0[HC_USTORM_SB_NUM_INDICES];
2931 u8 hc_timeout1[HC_USTORM_SB_NUM_INDICES];
2932 u8 hc_timeout2[HC_USTORM_SB_NUM_INDICES];
2933 u8 hc_timeout3[HC_USTORM_SB_NUM_INDICES];
2934};
2935
2936
2937/*
2938 * Protocol-common statistics collected by the Xstorm (per client)
2939 */
2940struct xstorm_per_client_stats {
2941 __le32 reserved0;
2942 __le32 unicast_pkts_sent;
2943 struct regpair unicast_bytes_sent;
2944 struct regpair multicast_bytes_sent;
2945 __le32 multicast_pkts_sent;
2946 __le32 broadcast_pkts_sent;
2947 struct regpair broadcast_bytes_sent;
2948 __le16 stats_counter;
2949 __le16 reserved1;
2950 __le32 reserved2;
2951};
2952
2953/*
2954 * Common statistics collected by the Xstorm (per port)
2955 */
2956struct xstorm_common_stats {
2957 struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID];
2958};
2959
2960/*
2961 * Protocol-common statistics collected by the Tstorm (per port)
2962 */
2963struct tstorm_per_port_stats {
2964 __le32 mac_filter_discard;
2965 __le32 xxoverflow_discard;
2966 __le32 brb_truncate_discard;
2967 __le32 mac_discard;
2968};
2969
2970/*
2971 * Protocol-common statistics collected by the Tstorm (per client)
2972 */
2973struct tstorm_per_client_stats {
2974 struct regpair rcv_unicast_bytes;
2975 struct regpair rcv_broadcast_bytes;
2976 struct regpair rcv_multicast_bytes;
2977 struct regpair rcv_error_bytes;
2978 __le32 checksum_discard;
2979 __le32 packets_too_big_discard;
2980 __le32 rcv_unicast_pkts;
2981 __le32 rcv_broadcast_pkts;
2982 __le32 rcv_multicast_pkts;
2983 __le32 no_buff_discard;
2984 __le32 ttl0_discard;
2985 __le16 stats_counter;
2986 __le16 reserved0;
2987};
2988
2989/*
2990 * Protocol-common statistics collected by the Tstorm
2991 */
2992struct tstorm_common_stats {
2993 struct tstorm_per_port_stats port_statistics;
2994 struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID];
2995};
2996
2997/*
2998 * Protocol-common statistics collected by the Ustorm (per client)
2999 */
3000struct ustorm_per_client_stats {
3001 struct regpair ucast_no_buff_bytes;
3002 struct regpair mcast_no_buff_bytes;
3003 struct regpair bcast_no_buff_bytes;
3004 __le32 ucast_no_buff_pkts;
3005 __le32 mcast_no_buff_pkts;
3006 __le32 bcast_no_buff_pkts;
3007 __le16 stats_counter;
3008 __le16 reserved0;
3009};
3010
3011/*
3012 * Protocol-common statistics collected by the Ustorm
3013 */
3014struct ustorm_common_stats {
3015 struct ustorm_per_client_stats client_statistics[MAX_U_STAT_COUNTER_ID];
3016};
3017
3018/*
3019 * Eth statistics query structure for the eth_stats_query ramrod
3020 */
3021struct eth_stats_query {
3022 struct xstorm_common_stats xstorm_common;
3023 struct tstorm_common_stats tstorm_common;
3024 struct ustorm_common_stats ustorm_common;
3025};
3026
3027
3028/*
3029 * per-vnic fairness variables
3030 */
3031struct fairness_vars_per_vn {
3032 u32 cos_credit_delta[MAX_COS_NUMBER];
3033 u32 protocol_credit_delta[NUM_OF_PROTOCOLS];
3034 u32 vn_credit_delta;
3035 u32 __reserved0;
3036};
3037
3038
3039/*
3040 * FW version stored in the Xstorm RAM
3041 */
3042struct fw_version {
3043#if defined(__BIG_ENDIAN)
3044 u8 engineering;
3045 u8 revision;
3046 u8 minor;
3047 u8 major;
3048#elif defined(__LITTLE_ENDIAN)
3049 u8 major;
3050 u8 minor;
3051 u8 revision;
3052 u8 engineering;
3053#endif
3054 u32 flags;
3055#define FW_VERSION_OPTIMIZED (0x1<<0)
3056#define FW_VERSION_OPTIMIZED_SHIFT 0
3057#define FW_VERSION_BIG_ENDIEN (0x1<<1)
3058#define FW_VERSION_BIG_ENDIEN_SHIFT 1
3059#define FW_VERSION_CHIP_VERSION (0x3<<2)
3060#define FW_VERSION_CHIP_VERSION_SHIFT 2
3061#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
3062#define __FW_VERSION_RESERVED_SHIFT 4
3063};
3064
3065
3066/*
3067 * FW version stored in first line of pram
3068 */
3069struct pram_fw_version {
3070 u8 major;
3071 u8 minor;
3072 u8 revision;
3073 u8 engineering;
3074 u8 flags;
3075#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
3076#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
3077#define PRAM_FW_VERSION_STORM_ID (0x3<<1)
3078#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
3079#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
3080#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
3081#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
3082#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
3083#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
3084#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
3085};
3086
3087
3088/*
3089 * The send queue element
3090 */
3091struct protocol_common_spe {
3092 struct spe_hdr hdr;
3093 struct regpair phy_address;
3094};
3095
3096
3097/*
3098 * a single rate shaping counter. can be used as protocol or vnic counter
3099 */
3100struct rate_shaping_counter {
3101 u32 quota;
3102#if defined(__BIG_ENDIAN)
3103 u16 __reserved0;
3104 u16 rate;
3105#elif defined(__LITTLE_ENDIAN)
3106 u16 rate;
3107 u16 __reserved0;
3108#endif
3109};
3110
3111
3112/*
3113 * per-vnic rate shaping variables
3114 */
3115struct rate_shaping_vars_per_vn {
3116 struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS];
3117 struct rate_shaping_counter vn_counter;
3118};
3119
3120
3121/*
3122 * The send queue element
3123 */
3124struct slow_path_element {
3125 struct spe_hdr hdr;
3126 u8 protocol_data[8];
3127};
3128
3129
3130/*
3131 * eth/toe flags that indicate if to query
3132 */
3133struct stats_indication_flags {
3134 u32 collect_eth;
3135 u32 collect_toe;
3136};
3137
3138
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
new file mode 100644
index 000000000000..65b26cbfe3e7
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -0,0 +1,152 @@
1/* bnx2x_init.h: Broadcom Everest network driver.
2 * Structures and macroes needed during the initialization.
3 *
4 * Copyright (c) 2007-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
11 * Written by: Eliezer Tamir
12 * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
13 */
14
15#ifndef BNX2X_INIT_H
16#define BNX2X_INIT_H
17
18/* RAM0 size in bytes */
19#define STORM_INTMEM_SIZE_E1 0x5800
20#define STORM_INTMEM_SIZE_E1H 0x10000
21#define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \
22 STORM_INTMEM_SIZE_E1H) / 4)
23
24
25/* Init operation types and structures */
26/* Common for both E1 and E1H */
27#define OP_RD 0x1 /* read single register */
28#define OP_WR 0x2 /* write single register */
29#define OP_IW 0x3 /* write single register using mailbox */
30#define OP_SW 0x4 /* copy a string to the device */
31#define OP_SI 0x5 /* copy a string using mailbox */
32#define OP_ZR 0x6 /* clear memory */
33#define OP_ZP 0x7 /* unzip then copy with DMAE */
34#define OP_WR_64 0x8 /* write 64 bit pattern */
35#define OP_WB 0x9 /* copy a string using DMAE */
36
37/* FPGA and EMUL specific operations */
38#define OP_WR_EMUL 0xa /* write single register on Emulation */
39#define OP_WR_FPGA 0xb /* write single register on FPGA */
40#define OP_WR_ASIC 0xc /* write single register on ASIC */
41
42/* Init stages */
43/* Never reorder stages !!! */
44#define COMMON_STAGE 0
45#define PORT0_STAGE 1
46#define PORT1_STAGE 2
47#define FUNC0_STAGE 3
48#define FUNC1_STAGE 4
49#define FUNC2_STAGE 5
50#define FUNC3_STAGE 6
51#define FUNC4_STAGE 7
52#define FUNC5_STAGE 8
53#define FUNC6_STAGE 9
54#define FUNC7_STAGE 10
55#define STAGE_IDX_MAX 11
56
57#define STAGE_START 0
58#define STAGE_END 1
59
60
61/* Indices of blocks */
62#define PRS_BLOCK 0
63#define SRCH_BLOCK 1
64#define TSDM_BLOCK 2
65#define TCM_BLOCK 3
66#define BRB1_BLOCK 4
67#define TSEM_BLOCK 5
68#define PXPCS_BLOCK 6
69#define EMAC0_BLOCK 7
70#define EMAC1_BLOCK 8
71#define DBU_BLOCK 9
72#define MISC_BLOCK 10
73#define DBG_BLOCK 11
74#define NIG_BLOCK 12
75#define MCP_BLOCK 13
76#define UPB_BLOCK 14
77#define CSDM_BLOCK 15
78#define USDM_BLOCK 16
79#define CCM_BLOCK 17
80#define UCM_BLOCK 18
81#define USEM_BLOCK 19
82#define CSEM_BLOCK 20
83#define XPB_BLOCK 21
84#define DQ_BLOCK 22
85#define TIMERS_BLOCK 23
86#define XSDM_BLOCK 24
87#define QM_BLOCK 25
88#define PBF_BLOCK 26
89#define XCM_BLOCK 27
90#define XSEM_BLOCK 28
91#define CDU_BLOCK 29
92#define DMAE_BLOCK 30
93#define PXP_BLOCK 31
94#define CFC_BLOCK 32
95#define HC_BLOCK 33
96#define PXP2_BLOCK 34
97#define MISC_AEU_BLOCK 35
98#define PGLUE_B_BLOCK 36
99#define IGU_BLOCK 37
100
101
102/* Returns the index of start or end of a specific block stage in ops array*/
103#define BLOCK_OPS_IDX(block, stage, end) \
104 (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
105
106
107struct raw_op {
108 u32 op:8;
109 u32 offset:24;
110 u32 raw_data;
111};
112
113struct op_read {
114 u32 op:8;
115 u32 offset:24;
116 u32 pad;
117};
118
119struct op_write {
120 u32 op:8;
121 u32 offset:24;
122 u32 val;
123};
124
125struct op_string_write {
126 u32 op:8;
127 u32 offset:24;
128#ifdef __LITTLE_ENDIAN
129 u16 data_off;
130 u16 data_len;
131#else /* __BIG_ENDIAN */
132 u16 data_len;
133 u16 data_off;
134#endif
135};
136
137struct op_zero {
138 u32 op:8;
139 u32 offset:24;
140 u32 len;
141};
142
143union init_op {
144 struct op_read read;
145 struct op_write write;
146 struct op_string_write str_wr;
147 struct op_zero zero;
148 struct raw_op raw;
149};
150
151#endif /* BNX2X_INIT_H */
152
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
new file mode 100644
index 000000000000..2b1363a6fe78
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -0,0 +1,506 @@
1/* bnx2x_init_ops.h: Broadcom Everest network driver.
2 * Static functions needed during the initialization.
3 * This file is "included" in bnx2x_main.c.
4 *
5 * Copyright (c) 2007-2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
12 * Written by: Vladislav Zolotarov <vladz@broadcom.com>
13 */
14
15#ifndef BNX2X_INIT_OPS_H
16#define BNX2X_INIT_OPS_H
17
18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
19
20
21static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
22 u32 len)
23{
24 u32 i;
25
26 for (i = 0; i < len; i++)
27 REG_WR(bp, addr + i*4, data[i]);
28}
29
30static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
31 u32 len)
32{
33 u32 i;
34
35 for (i = 0; i < len; i++)
36 REG_WR_IND(bp, addr + i*4, data[i]);
37}
38
39static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
40{
41 if (bp->dmae_ready)
42 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
43 else
44 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
45}
46
47static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
48{
49 u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
50 u32 buf_len32 = buf_len/4;
51 u32 i;
52
53 memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
54
55 for (i = 0; i < len; i += buf_len32) {
56 u32 cur_len = min(buf_len32, len - i);
57
58 bnx2x_write_big_buf(bp, addr + i*4, cur_len);
59 }
60}
61
62static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
63 u32 len64)
64{
65 u32 buf_len32 = FW_BUF_SIZE/4;
66 u32 len = len64*2;
67 u64 data64 = 0;
68 u32 i;
69
70 /* 64 bit value is in a blob: first low DWORD, then high DWORD */
71 data64 = HILO_U64((*(data + 1)), (*data));
72
73 len64 = min((u32)(FW_BUF_SIZE/8), len64);
74 for (i = 0; i < len64; i++) {
75 u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
76
77 *pdata = data64;
78 }
79
80 for (i = 0; i < len; i += buf_len32) {
81 u32 cur_len = min(buf_len32, len - i);
82
83 bnx2x_write_big_buf(bp, addr + i*4, cur_len);
84 }
85}
86
87/*********************************************************
88 There are different blobs for each PRAM section.
89 In addition, each blob write operation is divided into a few operations
90 in order to decrease the amount of phys. contiguous buffer needed.
91 Thus, when we select a blob the address may be with some offset
92 from the beginning of PRAM section.
93 The same holds for the INT_TABLE sections.
94**********************************************************/
95#define IF_IS_INT_TABLE_ADDR(base, addr) \
96 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
97
98#define IF_IS_PRAM_ADDR(base, addr) \
99 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
100
101static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
102{
103 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
104 data = INIT_TSEM_INT_TABLE_DATA(bp);
105 else
106 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
107 data = INIT_CSEM_INT_TABLE_DATA(bp);
108 else
109 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
110 data = INIT_USEM_INT_TABLE_DATA(bp);
111 else
112 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
113 data = INIT_XSEM_INT_TABLE_DATA(bp);
114 else
115 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
116 data = INIT_TSEM_PRAM_DATA(bp);
117 else
118 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
119 data = INIT_CSEM_PRAM_DATA(bp);
120 else
121 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
122 data = INIT_USEM_PRAM_DATA(bp);
123 else
124 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
125 data = INIT_XSEM_PRAM_DATA(bp);
126
127 return data;
128}
129
130static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
131{
132 if (bp->dmae_ready)
133 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
134 else
135 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
136}
137
138static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
139 u32 len)
140{
141 const u32 *old_data = data;
142
143 data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
144
145 if (bp->dmae_ready) {
146 if (old_data != data)
147 VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
148 else
149 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
150 } else
151 bnx2x_init_ind_wr(bp, addr, data, len);
152}
153
154static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
155{
156 const u8 *data = NULL;
157 int rc;
158 u32 i;
159
160 data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
161
162 rc = bnx2x_gunzip(bp, data, len);
163 if (rc)
164 return;
165
166 /* gunzip_outlen is in dwords */
167 len = GUNZIP_OUTLEN(bp);
168 for (i = 0; i < len; i++)
169 ((u32 *)GUNZIP_BUF(bp))[i] =
170 cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
171
172 bnx2x_write_big_buf_wb(bp, addr, len);
173}
174
175static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
176{
177 u16 op_start =
178 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
179 u16 op_end =
180 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
181 union init_op *op;
182 int hw_wr;
183 u32 i, op_type, addr, len;
184 const u32 *data, *data_base;
185
186 /* If empty block */
187 if (op_start == op_end)
188 return;
189
190 if (CHIP_REV_IS_FPGA(bp))
191 hw_wr = OP_WR_FPGA;
192 else if (CHIP_REV_IS_EMUL(bp))
193 hw_wr = OP_WR_EMUL;
194 else
195 hw_wr = OP_WR_ASIC;
196
197 data_base = INIT_DATA(bp);
198
199 for (i = op_start; i < op_end; i++) {
200
201 op = (union init_op *)&(INIT_OPS(bp)[i]);
202
203 op_type = op->str_wr.op;
204 addr = op->str_wr.offset;
205 len = op->str_wr.data_len;
206 data = data_base + op->str_wr.data_off;
207
208 /* HW/EMUL specific */
209 if ((op_type > OP_WB) && (op_type == hw_wr))
210 op_type = OP_WR;
211
212 switch (op_type) {
213 case OP_RD:
214 REG_RD(bp, addr);
215 break;
216 case OP_WR:
217 REG_WR(bp, addr, op->write.val);
218 break;
219 case OP_SW:
220 bnx2x_init_str_wr(bp, addr, data, len);
221 break;
222 case OP_WB:
223 bnx2x_init_wr_wb(bp, addr, data, len);
224 break;
225 case OP_SI:
226 bnx2x_init_ind_wr(bp, addr, data, len);
227 break;
228 case OP_ZR:
229 bnx2x_init_fill(bp, addr, 0, op->zero.len);
230 break;
231 case OP_ZP:
232 bnx2x_init_wr_zp(bp, addr, len,
233 op->str_wr.data_off);
234 break;
235 case OP_WR_64:
236 bnx2x_init_wr_64(bp, addr, data, len);
237 break;
238 default:
239 /* happens whenever an op is of a diff HW */
240 break;
241 }
242 }
243}
244
245
246/****************************************************************************
247* PXP Arbiter
248****************************************************************************/
249/*
250 * This code configures the PCI read/write arbiter
251 * which implements a weighted round robin
252 * between the virtual queues in the chip.
253 *
254 * The values were derived for each PCI max payload and max request size.
255 * since max payload and max request size are only known at run time,
256 * this is done as a separate init stage.
257 */
258
259#define NUM_WR_Q 13
260#define NUM_RD_Q 29
261#define MAX_RD_ORD 3
262#define MAX_WR_ORD 2
263
264/* configuration for one arbiter queue */
265struct arb_line {
266 int l;
267 int add;
268 int ubound;
269};
270
271/* derived configuration for each read queue for each max request size */
272static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
273/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
274 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
275 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
276 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
277 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
278 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
279 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
280 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
281 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
282/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
283 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
284 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
285 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
286 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
287 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
288 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
289 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
290 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
291 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
292/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
293 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
294 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
295 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
296 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
297 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
298 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
299 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
300 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
301 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
302};
303
304/* derived configuration for each write queue for each max request size */
305static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
306/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
307 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
308 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
309 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
310 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
311 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
312 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
313 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
314 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
315/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
316 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
317 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
318 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
319};
320
321/* register addresses for read queues */
322static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
323/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
324 PXP2_REG_RQ_BW_RD_UBOUND0},
325 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
326 PXP2_REG_PSWRQ_BW_UB1},
327 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
328 PXP2_REG_PSWRQ_BW_UB2},
329 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
330 PXP2_REG_PSWRQ_BW_UB3},
331 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
332 PXP2_REG_RQ_BW_RD_UBOUND4},
333 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
334 PXP2_REG_RQ_BW_RD_UBOUND5},
335 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
336 PXP2_REG_PSWRQ_BW_UB6},
337 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
338 PXP2_REG_PSWRQ_BW_UB7},
339 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
340 PXP2_REG_PSWRQ_BW_UB8},
341/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
342 PXP2_REG_PSWRQ_BW_UB9},
343 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
344 PXP2_REG_PSWRQ_BW_UB10},
345 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
346 PXP2_REG_PSWRQ_BW_UB11},
347 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
348 PXP2_REG_RQ_BW_RD_UBOUND12},
349 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
350 PXP2_REG_RQ_BW_RD_UBOUND13},
351 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
352 PXP2_REG_RQ_BW_RD_UBOUND14},
353 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
354 PXP2_REG_RQ_BW_RD_UBOUND15},
355 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
356 PXP2_REG_RQ_BW_RD_UBOUND16},
357 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
358 PXP2_REG_RQ_BW_RD_UBOUND17},
359 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
360 PXP2_REG_RQ_BW_RD_UBOUND18},
361/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
362 PXP2_REG_RQ_BW_RD_UBOUND19},
363 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
364 PXP2_REG_RQ_BW_RD_UBOUND20},
365 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
366 PXP2_REG_RQ_BW_RD_UBOUND22},
367 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
368 PXP2_REG_RQ_BW_RD_UBOUND23},
369 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
370 PXP2_REG_RQ_BW_RD_UBOUND24},
371 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
372 PXP2_REG_RQ_BW_RD_UBOUND25},
373 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
374 PXP2_REG_RQ_BW_RD_UBOUND26},
375 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
376 PXP2_REG_RQ_BW_RD_UBOUND27},
377 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
378 PXP2_REG_PSWRQ_BW_UB28}
379};
380
381/* register addresses for write queues */
382static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
383/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
384 PXP2_REG_PSWRQ_BW_UB1},
385 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
386 PXP2_REG_PSWRQ_BW_UB2},
387 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
388 PXP2_REG_PSWRQ_BW_UB3},
389 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
390 PXP2_REG_PSWRQ_BW_UB6},
391 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
392 PXP2_REG_PSWRQ_BW_UB7},
393 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
394 PXP2_REG_PSWRQ_BW_UB8},
395 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
396 PXP2_REG_PSWRQ_BW_UB9},
397 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
398 PXP2_REG_PSWRQ_BW_UB10},
399 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
400 PXP2_REG_PSWRQ_BW_UB11},
401/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
402 PXP2_REG_PSWRQ_BW_UB28},
403 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
404 PXP2_REG_RQ_BW_WR_UBOUND29},
405 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
406 PXP2_REG_RQ_BW_WR_UBOUND30}
407};
408
409static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
410{
411 u32 val, i;
412
413 if (r_order > MAX_RD_ORD) {
414 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
415 r_order, MAX_RD_ORD);
416 r_order = MAX_RD_ORD;
417 }
418 if (w_order > MAX_WR_ORD) {
419 DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
420 w_order, MAX_WR_ORD);
421 w_order = MAX_WR_ORD;
422 }
423 if (CHIP_REV_IS_FPGA(bp)) {
424 DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
425 w_order = 0;
426 }
427 DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
428
429 for (i = 0; i < NUM_RD_Q-1; i++) {
430 REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
431 REG_WR(bp, read_arb_addr[i].add,
432 read_arb_data[i][r_order].add);
433 REG_WR(bp, read_arb_addr[i].ubound,
434 read_arb_data[i][r_order].ubound);
435 }
436
437 for (i = 0; i < NUM_WR_Q-1; i++) {
438 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
439 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
440
441 REG_WR(bp, write_arb_addr[i].l,
442 write_arb_data[i][w_order].l);
443
444 REG_WR(bp, write_arb_addr[i].add,
445 write_arb_data[i][w_order].add);
446
447 REG_WR(bp, write_arb_addr[i].ubound,
448 write_arb_data[i][w_order].ubound);
449 } else {
450
451 val = REG_RD(bp, write_arb_addr[i].l);
452 REG_WR(bp, write_arb_addr[i].l,
453 val | (write_arb_data[i][w_order].l << 10));
454
455 val = REG_RD(bp, write_arb_addr[i].add);
456 REG_WR(bp, write_arb_addr[i].add,
457 val | (write_arb_data[i][w_order].add << 10));
458
459 val = REG_RD(bp, write_arb_addr[i].ubound);
460 REG_WR(bp, write_arb_addr[i].ubound,
461 val | (write_arb_data[i][w_order].ubound << 7));
462 }
463 }
464
465 val = write_arb_data[NUM_WR_Q-1][w_order].add;
466 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
467 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
468 REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
469
470 val = read_arb_data[NUM_RD_Q-1][r_order].add;
471 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
472 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
473 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
474
475 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
476 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
477 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
478 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
479
480 if (r_order == MAX_RD_ORD)
481 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
482
483 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
484
485 if (CHIP_IS_E1H(bp)) {
486 /* MPS w_order optimal TH presently TH
487 * 128 0 0 2
488 * 256 1 1 3
489 * >=512 2 2 3
490 */
491 val = ((w_order == 0) ? 2 : 3);
492 REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
493 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
494 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
495 REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
496 REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
497 REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
498 REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
499 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
500 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
501 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
502 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
503 }
504}
505
506#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
new file mode 100644
index 000000000000..0383e3066313
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -0,0 +1,6735 @@
1/* Copyright 2008-2009 Broadcom Corporation
2 *
3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you
5 * under the terms of the GNU General Public License version 2, available
6 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
7 *
8 * Notwithstanding the above, under no circumstances may you combine this
9 * software in any way with any other Broadcom software provided under a
10 * license other than the GPL, without Broadcom's express prior written
11 * consent.
12 *
13 * Written by Yaniv Rosner
14 *
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/delay.h>
24#include <linux/ethtool.h>
25#include <linux/mutex.h>
26
27#include "bnx2x.h"
28
29/********************************************************/
30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
32#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600
35#define MDIO_ACCESS_TIMEOUT 1000
36#define BMAC_CONTROL_RX_ENABLE 2
37
38/***********************************************************/
39/* Shortcut definitions */
40/***********************************************************/
41
42#define NIG_LATCH_BC_ENABLE_MI_INT 0
43
44#define NIG_STATUS_EMAC0_MI_INT \
45 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
46#define NIG_STATUS_XGXS0_LINK10G \
47 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
48#define NIG_STATUS_XGXS0_LINK_STATUS \
49 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
50#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
51 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
52#define NIG_STATUS_SERDES0_LINK_STATUS \
53 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
54#define NIG_MASK_MI_INT \
55 NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
56#define NIG_MASK_XGXS0_LINK10G \
57 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
58#define NIG_MASK_XGXS0_LINK_STATUS \
59 NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
60#define NIG_MASK_SERDES0_LINK_STATUS \
61 NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
62
63#define MDIO_AN_CL73_OR_37_COMPLETE \
64 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
65 MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
66
67#define XGXS_RESET_BITS \
68 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
69 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
70 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
71 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
72 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
73
74#define SERDES_RESET_BITS \
75 (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
76 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
77 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
78 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
79
80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
83#define AUTONEG_PARALLEL \
84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
85#define AUTONEG_SGMII_FIBER_AUTODET \
86 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
87#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
88
89#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
90 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
91#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
92 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
93#define GP_STATUS_SPEED_MASK \
94 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
95#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
96#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
97#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
98#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
99#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
100#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
101#define GP_STATUS_10G_HIG \
102 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
103#define GP_STATUS_10G_CX4 \
104 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
105#define GP_STATUS_12G_HIG \
106 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG
107#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G
108#define GP_STATUS_13G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G
109#define GP_STATUS_15G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G
110#define GP_STATUS_16G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G
111#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
112#define GP_STATUS_10G_KX4 \
113 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
114
115#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
116#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
117#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
118#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
119#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
120#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
121#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
122#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
123#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
124#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
125#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
126#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
127#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
128#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
129#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
130#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
131#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
132#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
133#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
134#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
135#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
136#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
137#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
138
139#define PHY_XGXS_FLAG 0x1
140#define PHY_SGMII_FLAG 0x2
141#define PHY_SERDES_FLAG 0x4
142
143/* */
144#define SFP_EEPROM_CON_TYPE_ADDR 0x2
145 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
146 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
147
148
149#define SFP_EEPROM_COMP_CODE_ADDR 0x3
150 #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
151 #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
152 #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
153
154#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
155 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
157
158#define SFP_EEPROM_OPTIONS_ADDR 0x40
159 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
160#define SFP_EEPROM_OPTIONS_SIZE 2
161
162#define EDC_MODE_LINEAR 0x0022
163#define EDC_MODE_LIMITING 0x0044
164#define EDC_MODE_PASSIVE_DAC 0x0055
165
166
167
168/**********************************************************/
169/* INTERFACE */
170/**********************************************************/
171#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
172 bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \
173 DEFAULT_PHY_DEV_ADDR, \
174 (_bank + (_addr & 0xf)), \
175 _val)
176
177#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
178 bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \
179 DEFAULT_PHY_DEV_ADDR, \
180 (_bank + (_addr & 0xf)), \
181 _val)
182
183static void bnx2x_set_serdes_access(struct link_params *params)
184{
185 struct bnx2x *bp = params->bp;
186 u32 emac_base = (params->port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
187
188 /* Set Clause 22 */
189 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 1);
190 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
191 udelay(500);
192 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
193 udelay(500);
194 /* Set Clause 45 */
195 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 0);
196}
197static void bnx2x_set_phy_mdio(struct link_params *params, u8 phy_flags)
198{
199 struct bnx2x *bp = params->bp;
200
201 if (phy_flags & PHY_XGXS_FLAG) {
202 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
203 params->port*0x18, 0);
204 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
205 DEFAULT_PHY_DEV_ADDR);
206 } else {
207 bnx2x_set_serdes_access(params);
208
209 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
210 params->port*0x10,
211 DEFAULT_PHY_DEV_ADDR);
212 }
213}
214
215static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
216{
217 u32 val = REG_RD(bp, reg);
218
219 val |= bits;
220 REG_WR(bp, reg, val);
221 return val;
222}
223
224static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
225{
226 u32 val = REG_RD(bp, reg);
227
228 val &= ~bits;
229 REG_WR(bp, reg, val);
230 return val;
231}
232
233static void bnx2x_emac_init(struct link_params *params,
234 struct link_vars *vars)
235{
236 /* reset and unreset the emac core */
237 struct bnx2x *bp = params->bp;
238 u8 port = params->port;
239 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
240 u32 val;
241 u16 timeout;
242
243 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
244 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
245 udelay(5);
246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
247 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
248
249 /* init emac - use read-modify-write */
250 /* self clear reset */
251 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
252 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
253
254 timeout = 200;
255 do {
256 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
257 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
258 if (!timeout) {
259 DP(NETIF_MSG_LINK, "EMAC timeout!\n");
260 return;
261 }
262 timeout--;
263 } while (val & EMAC_MODE_RESET);
264
265 /* Set mac address */
266 val = ((params->mac_addr[0] << 8) |
267 params->mac_addr[1]);
268 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
269
270 val = ((params->mac_addr[2] << 24) |
271 (params->mac_addr[3] << 16) |
272 (params->mac_addr[4] << 8) |
273 params->mac_addr[5]);
274 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
275}
276
277static u8 bnx2x_emac_enable(struct link_params *params,
278 struct link_vars *vars, u8 lb)
279{
280 struct bnx2x *bp = params->bp;
281 u8 port = params->port;
282 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
283 u32 val;
284
285 DP(NETIF_MSG_LINK, "enabling EMAC\n");
286
287 /* enable emac and not bmac */
288 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
289
290 /* for paladium */
291 if (CHIP_REV_IS_EMUL(bp)) {
292 /* Use lane 1 (of lanes 0-3) */
293 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
294 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
295 port*4, 1);
296 }
297 /* for fpga */
298 else
299
300 if (CHIP_REV_IS_FPGA(bp)) {
301 /* Use lane 1 (of lanes 0-3) */
302 DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
303
304 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
305 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
306 0);
307 } else
308 /* ASIC */
309 if (vars->phy_flags & PHY_XGXS_FLAG) {
310 u32 ser_lane = ((params->lane_config &
311 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
312 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
313
314 DP(NETIF_MSG_LINK, "XGXS\n");
315 /* select the master lanes (out of 0-3) */
316 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
317 port*4, ser_lane);
318 /* select XGXS */
319 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
320 port*4, 1);
321
322 } else { /* SerDes */
323 DP(NETIF_MSG_LINK, "SerDes\n");
324 /* select SerDes */
325 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
326 port*4, 0);
327 }
328
329 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
330 EMAC_RX_MODE_RESET);
331 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
332 EMAC_TX_MODE_RESET);
333
334 if (CHIP_REV_IS_SLOW(bp)) {
335 /* config GMII mode */
336 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
337 EMAC_WR(bp, EMAC_REG_EMAC_MODE,
338 (val | EMAC_MODE_PORT_GMII));
339 } else { /* ASIC */
340 /* pause enable/disable */
341 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
342 EMAC_RX_MODE_FLOW_EN);
343 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
344 bnx2x_bits_en(bp, emac_base +
345 EMAC_REG_EMAC_RX_MODE,
346 EMAC_RX_MODE_FLOW_EN);
347
348 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
349 (EMAC_TX_MODE_EXT_PAUSE_EN |
350 EMAC_TX_MODE_FLOW_EN));
351 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
352 bnx2x_bits_en(bp, emac_base +
353 EMAC_REG_EMAC_TX_MODE,
354 (EMAC_TX_MODE_EXT_PAUSE_EN |
355 EMAC_TX_MODE_FLOW_EN));
356 }
357
358 /* KEEP_VLAN_TAG, promiscuous */
359 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
360 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
361 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
362
363 /* Set Loopback */
364 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
365 if (lb)
366 val |= 0x810;
367 else
368 val &= ~0x810;
369 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
370
371 /* enable emac */
372 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
373
374 /* enable emac for jumbo packets */
375 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
376 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
377 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
378
379 /* strip CRC */
380 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
381
382 /* disable the NIG in/out to the bmac */
383 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
384 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
385 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
386
387 /* enable the NIG in/out to the emac */
388 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
389 val = 0;
390 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
391 val = 1;
392
393 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
394 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
395
396 if (CHIP_REV_IS_EMUL(bp)) {
397 /* take the BigMac out of reset */
398 REG_WR(bp,
399 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
400 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
401
402 /* enable access for bmac registers */
403 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
404 } else
405 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
406
407 vars->mac_type = MAC_TYPE_EMAC;
408 return 0;
409}
410
411
412
413static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
414 u8 is_lb)
415{
416 struct bnx2x *bp = params->bp;
417 u8 port = params->port;
418 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
419 NIG_REG_INGRESS_BMAC0_MEM;
420 u32 wb_data[2];
421 u32 val;
422
423 DP(NETIF_MSG_LINK, "Enabling BigMAC\n");
424 /* reset and unreset the BigMac */
425 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
426 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
427 msleep(1);
428
429 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
430 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
431
432 /* enable access for bmac registers */
433 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
434
435 /* XGXS control */
436 wb_data[0] = 0x3c;
437 wb_data[1] = 0;
438 REG_WR_DMAE(bp, bmac_addr +
439 BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
440 wb_data, 2);
441
442 /* tx MAC SA */
443 wb_data[0] = ((params->mac_addr[2] << 24) |
444 (params->mac_addr[3] << 16) |
445 (params->mac_addr[4] << 8) |
446 params->mac_addr[5]);
447 wb_data[1] = ((params->mac_addr[0] << 8) |
448 params->mac_addr[1]);
449 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
450 wb_data, 2);
451
452 /* tx control */
453 val = 0xc0;
454 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
455 val |= 0x800000;
456 wb_data[0] = val;
457 wb_data[1] = 0;
458 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
459 wb_data, 2);
460
461 /* mac control */
462 val = 0x3;
463 if (is_lb) {
464 val |= 0x4;
465 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
466 }
467 wb_data[0] = val;
468 wb_data[1] = 0;
469 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
470 wb_data, 2);
471
472 /* set rx mtu */
473 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
474 wb_data[1] = 0;
475 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
476 wb_data, 2);
477
478 /* rx control set to don't strip crc */
479 val = 0x14;
480 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
481 val |= 0x20;
482 wb_data[0] = val;
483 wb_data[1] = 0;
484 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
485 wb_data, 2);
486
487 /* set tx mtu */
488 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
489 wb_data[1] = 0;
490 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
491 wb_data, 2);
492
493 /* set cnt max size */
494 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
495 wb_data[1] = 0;
496 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
497 wb_data, 2);
498
499 /* configure safc */
500 wb_data[0] = 0x1000200;
501 wb_data[1] = 0;
502 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
503 wb_data, 2);
504 /* fix for emulation */
505 if (CHIP_REV_IS_EMUL(bp)) {
506 wb_data[0] = 0xf000;
507 wb_data[1] = 0;
508 REG_WR_DMAE(bp,
509 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
510 wb_data, 2);
511 }
512
513 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
514 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
515 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
516 val = 0;
517 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
518 val = 1;
519 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
520 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
521 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
522 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
523 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
524 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
525
526 vars->mac_type = MAC_TYPE_BMAC;
527 return 0;
528}
529
530static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags)
531{
532 struct bnx2x *bp = params->bp;
533 u32 val;
534
535 if (phy_flags & PHY_XGXS_FLAG) {
536 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
537 val = XGXS_RESET_BITS;
538
539 } else { /* SerDes */
540 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
541 val = SERDES_RESET_BITS;
542 }
543
544 val = val << (params->port*16);
545
546 /* reset and unreset the SerDes/XGXS */
547 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
548 val);
549 udelay(500);
550 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
551 val);
552 bnx2x_set_phy_mdio(params, phy_flags);
553}
554
555void bnx2x_link_status_update(struct link_params *params,
556 struct link_vars *vars)
557{
558 struct bnx2x *bp = params->bp;
559 u8 link_10g;
560 u8 port = params->port;
561
562 if (params->switch_cfg == SWITCH_CFG_1G)
563 vars->phy_flags = PHY_SERDES_FLAG;
564 else
565 vars->phy_flags = PHY_XGXS_FLAG;
566 vars->link_status = REG_RD(bp, params->shmem_base +
567 offsetof(struct shmem_region,
568 port_mb[port].link_status));
569
570 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
571
572 if (vars->link_up) {
573 DP(NETIF_MSG_LINK, "phy link up\n");
574
575 vars->phy_link_up = 1;
576 vars->duplex = DUPLEX_FULL;
577 switch (vars->link_status &
578 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
579 case LINK_10THD:
580 vars->duplex = DUPLEX_HALF;
581 /* fall thru */
582 case LINK_10TFD:
583 vars->line_speed = SPEED_10;
584 break;
585
586 case LINK_100TXHD:
587 vars->duplex = DUPLEX_HALF;
588 /* fall thru */
589 case LINK_100T4:
590 case LINK_100TXFD:
591 vars->line_speed = SPEED_100;
592 break;
593
594 case LINK_1000THD:
595 vars->duplex = DUPLEX_HALF;
596 /* fall thru */
597 case LINK_1000TFD:
598 vars->line_speed = SPEED_1000;
599 break;
600
601 case LINK_2500THD:
602 vars->duplex = DUPLEX_HALF;
603 /* fall thru */
604 case LINK_2500TFD:
605 vars->line_speed = SPEED_2500;
606 break;
607
608 case LINK_10GTFD:
609 vars->line_speed = SPEED_10000;
610 break;
611
612 case LINK_12GTFD:
613 vars->line_speed = SPEED_12000;
614 break;
615
616 case LINK_12_5GTFD:
617 vars->line_speed = SPEED_12500;
618 break;
619
620 case LINK_13GTFD:
621 vars->line_speed = SPEED_13000;
622 break;
623
624 case LINK_15GTFD:
625 vars->line_speed = SPEED_15000;
626 break;
627
628 case LINK_16GTFD:
629 vars->line_speed = SPEED_16000;
630 break;
631
632 default:
633 break;
634 }
635
636 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
637 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
638 else
639 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
640
641 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
642 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
643 else
644 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
645
646 if (vars->phy_flags & PHY_XGXS_FLAG) {
647 if (vars->line_speed &&
648 ((vars->line_speed == SPEED_10) ||
649 (vars->line_speed == SPEED_100))) {
650 vars->phy_flags |= PHY_SGMII_FLAG;
651 } else {
652 vars->phy_flags &= ~PHY_SGMII_FLAG;
653 }
654 }
655
656 /* anything 10 and over uses the bmac */
657 link_10g = ((vars->line_speed == SPEED_10000) ||
658 (vars->line_speed == SPEED_12000) ||
659 (vars->line_speed == SPEED_12500) ||
660 (vars->line_speed == SPEED_13000) ||
661 (vars->line_speed == SPEED_15000) ||
662 (vars->line_speed == SPEED_16000));
663 if (link_10g)
664 vars->mac_type = MAC_TYPE_BMAC;
665 else
666 vars->mac_type = MAC_TYPE_EMAC;
667
668 } else { /* link down */
669 DP(NETIF_MSG_LINK, "phy link down\n");
670
671 vars->phy_link_up = 0;
672
673 vars->line_speed = 0;
674 vars->duplex = DUPLEX_FULL;
675 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
676
677 /* indicate no mac active */
678 vars->mac_type = MAC_TYPE_NONE;
679 }
680
681 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
682 vars->link_status, vars->phy_link_up);
683 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
684 vars->line_speed, vars->duplex, vars->flow_ctrl);
685}
686
687static void bnx2x_update_mng(struct link_params *params, u32 link_status)
688{
689 struct bnx2x *bp = params->bp;
690
691 REG_WR(bp, params->shmem_base +
692 offsetof(struct shmem_region,
693 port_mb[params->port].link_status),
694 link_status);
695}
696
697static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
698{
699 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
700 NIG_REG_INGRESS_BMAC0_MEM;
701 u32 wb_data[2];
702 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
703
704 /* Only if the bmac is out of reset */
705 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
706 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
707 nig_bmac_enable) {
708
709 /* Clear Rx Enable bit in BMAC_CONTROL register */
710 REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
711 wb_data, 2);
712 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
713 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
714 wb_data, 2);
715
716 msleep(1);
717 }
718}
719
720static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
721 u32 line_speed)
722{
723 struct bnx2x *bp = params->bp;
724 u8 port = params->port;
725 u32 init_crd, crd;
726 u32 count = 1000;
727
728 /* disable port */
729 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
730
731 /* wait for init credit */
732 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
733 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
734 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
735
736 while ((init_crd != crd) && count) {
737 msleep(5);
738
739 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
740 count--;
741 }
742 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
743 if (init_crd != crd) {
744 DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
745 init_crd, crd);
746 return -EINVAL;
747 }
748
749 if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
750 line_speed == SPEED_10 ||
751 line_speed == SPEED_100 ||
752 line_speed == SPEED_1000 ||
753 line_speed == SPEED_2500) {
754 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
755 /* update threshold */
756 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
757 /* update init credit */
758 init_crd = 778; /* (800-18-4) */
759
760 } else {
761 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
762 ETH_OVREHEAD)/16;
763 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
764 /* update threshold */
765 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
766 /* update init credit */
767 switch (line_speed) {
768 case SPEED_10000:
769 init_crd = thresh + 553 - 22;
770 break;
771
772 case SPEED_12000:
773 init_crd = thresh + 664 - 22;
774 break;
775
776 case SPEED_13000:
777 init_crd = thresh + 742 - 22;
778 break;
779
780 case SPEED_16000:
781 init_crd = thresh + 778 - 22;
782 break;
783 default:
784 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
785 line_speed);
786 return -EINVAL;
787 }
788 }
789 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
790 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
791 line_speed, init_crd);
792
793 /* probe the credit changes */
794 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
795 msleep(5);
796 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
797
798 /* enable port */
799 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
800 return 0;
801}
802
803static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 ext_phy_type, u8 port)
804{
805 u32 emac_base;
806
807 switch (ext_phy_type) {
808 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
810 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
811 /* All MDC/MDIO is directed through single EMAC */
812 if (REG_RD(bp, NIG_REG_PORT_SWAP))
813 emac_base = GRCBASE_EMAC0;
814 else
815 emac_base = GRCBASE_EMAC1;
816 break;
817 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
818 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
819 break;
820 default:
821 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
822 break;
823 }
824 return emac_base;
825
826}
827
828u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
829 u8 phy_addr, u8 devad, u16 reg, u16 val)
830{
831 u32 tmp, saved_mode;
832 u8 i, rc = 0;
833 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
834
835 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
836 * (a value of 49==0x31) and make sure that the AUTO poll is off
837 */
838
839 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
840 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
841 EMAC_MDIO_MODE_CLOCK_CNT);
842 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
843 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
844 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
845 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
846 udelay(40);
847
848 /* address */
849
850 tmp = ((phy_addr << 21) | (devad << 16) | reg |
851 EMAC_MDIO_COMM_COMMAND_ADDRESS |
852 EMAC_MDIO_COMM_START_BUSY);
853 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
854
855 for (i = 0; i < 50; i++) {
856 udelay(10);
857
858 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
859 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
860 udelay(5);
861 break;
862 }
863 }
864 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
865 DP(NETIF_MSG_LINK, "write phy register failed\n");
866 rc = -EFAULT;
867 } else {
868 /* data */
869 tmp = ((phy_addr << 21) | (devad << 16) | val |
870 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
871 EMAC_MDIO_COMM_START_BUSY);
872 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
873
874 for (i = 0; i < 50; i++) {
875 udelay(10);
876
877 tmp = REG_RD(bp, mdio_ctrl +
878 EMAC_REG_EMAC_MDIO_COMM);
879 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
880 udelay(5);
881 break;
882 }
883 }
884 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
885 DP(NETIF_MSG_LINK, "write phy register failed\n");
886 rc = -EFAULT;
887 }
888 }
889
890 /* Restore the saved mode */
891 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
892
893 return rc;
894}
895
896u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
897 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val)
898{
899 u32 val, saved_mode;
900 u16 i;
901 u8 rc = 0;
902
903 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
904 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
905 * (a value of 49==0x31) and make sure that the AUTO poll is off
906 */
907
908 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
909 val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL |
910 EMAC_MDIO_MODE_CLOCK_CNT));
911 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
912 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
913 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
914 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
915 udelay(40);
916
917 /* address */
918 val = ((phy_addr << 21) | (devad << 16) | reg |
919 EMAC_MDIO_COMM_COMMAND_ADDRESS |
920 EMAC_MDIO_COMM_START_BUSY);
921 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
922
923 for (i = 0; i < 50; i++) {
924 udelay(10);
925
926 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
927 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
928 udelay(5);
929 break;
930 }
931 }
932 if (val & EMAC_MDIO_COMM_START_BUSY) {
933 DP(NETIF_MSG_LINK, "read phy register failed\n");
934
935 *ret_val = 0;
936 rc = -EFAULT;
937
938 } else {
939 /* data */
940 val = ((phy_addr << 21) | (devad << 16) |
941 EMAC_MDIO_COMM_COMMAND_READ_45 |
942 EMAC_MDIO_COMM_START_BUSY);
943 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
944
945 for (i = 0; i < 50; i++) {
946 udelay(10);
947
948 val = REG_RD(bp, mdio_ctrl +
949 EMAC_REG_EMAC_MDIO_COMM);
950 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
951 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
952 break;
953 }
954 }
955 if (val & EMAC_MDIO_COMM_START_BUSY) {
956 DP(NETIF_MSG_LINK, "read phy register failed\n");
957
958 *ret_val = 0;
959 rc = -EFAULT;
960 }
961 }
962
963 /* Restore the saved mode */
964 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
965
966 return rc;
967}
968
969static void bnx2x_set_aer_mmd(struct link_params *params,
970 struct link_vars *vars)
971{
972 struct bnx2x *bp = params->bp;
973 u32 ser_lane;
974 u16 offset;
975
976 ser_lane = ((params->lane_config &
977 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
978 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
979
980 offset = (vars->phy_flags & PHY_XGXS_FLAG) ?
981 (params->phy_addr + ser_lane) : 0;
982
983 CL45_WR_OVER_CL22(bp, params->port,
984 params->phy_addr,
985 MDIO_REG_BANK_AER_BLOCK,
986 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
987}
988
989static void bnx2x_set_master_ln(struct link_params *params)
990{
991 struct bnx2x *bp = params->bp;
992 u16 new_master_ln, ser_lane;
993 ser_lane = ((params->lane_config &
994 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
995 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
996
997 /* set the master_ln for AN */
998 CL45_RD_OVER_CL22(bp, params->port,
999 params->phy_addr,
1000 MDIO_REG_BANK_XGXS_BLOCK2,
1001 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1002 &new_master_ln);
1003
1004 CL45_WR_OVER_CL22(bp, params->port,
1005 params->phy_addr,
1006 MDIO_REG_BANK_XGXS_BLOCK2 ,
1007 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1008 (new_master_ln | ser_lane));
1009}
1010
1011static u8 bnx2x_reset_unicore(struct link_params *params)
1012{
1013 struct bnx2x *bp = params->bp;
1014 u16 mii_control;
1015 u16 i;
1016
1017 CL45_RD_OVER_CL22(bp, params->port,
1018 params->phy_addr,
1019 MDIO_REG_BANK_COMBO_IEEE0,
1020 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1021
1022 /* reset the unicore */
1023 CL45_WR_OVER_CL22(bp, params->port,
1024 params->phy_addr,
1025 MDIO_REG_BANK_COMBO_IEEE0,
1026 MDIO_COMBO_IEEE0_MII_CONTROL,
1027 (mii_control |
1028 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1029 if (params->switch_cfg == SWITCH_CFG_1G)
1030 bnx2x_set_serdes_access(params);
1031
1032 /* wait for the reset to self clear */
1033 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
1034 udelay(5);
1035
1036 /* the reset erased the previous bank value */
1037 CL45_RD_OVER_CL22(bp, params->port,
1038 params->phy_addr,
1039 MDIO_REG_BANK_COMBO_IEEE0,
1040 MDIO_COMBO_IEEE0_MII_CONTROL,
1041 &mii_control);
1042
1043 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
1044 udelay(5);
1045 return 0;
1046 }
1047 }
1048
1049 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
1050 return -EINVAL;
1051
1052}
1053
1054static void bnx2x_set_swap_lanes(struct link_params *params)
1055{
1056 struct bnx2x *bp = params->bp;
1057 /* Each two bits represents a lane number:
1058 No swap is 0123 => 0x1b no need to enable the swap */
1059 u16 ser_lane, rx_lane_swap, tx_lane_swap;
1060
1061 ser_lane = ((params->lane_config &
1062 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1063 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1064 rx_lane_swap = ((params->lane_config &
1065 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
1066 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
1067 tx_lane_swap = ((params->lane_config &
1068 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
1069 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1070
1071 if (rx_lane_swap != 0x1b) {
1072 CL45_WR_OVER_CL22(bp, params->port,
1073 params->phy_addr,
1074 MDIO_REG_BANK_XGXS_BLOCK2,
1075 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1076 (rx_lane_swap |
1077 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1078 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1079 } else {
1080 CL45_WR_OVER_CL22(bp, params->port,
1081 params->phy_addr,
1082 MDIO_REG_BANK_XGXS_BLOCK2,
1083 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1084 }
1085
1086 if (tx_lane_swap != 0x1b) {
1087 CL45_WR_OVER_CL22(bp, params->port,
1088 params->phy_addr,
1089 MDIO_REG_BANK_XGXS_BLOCK2,
1090 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1091 (tx_lane_swap |
1092 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1093 } else {
1094 CL45_WR_OVER_CL22(bp, params->port,
1095 params->phy_addr,
1096 MDIO_REG_BANK_XGXS_BLOCK2,
1097 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1098 }
1099}
1100
1101static void bnx2x_set_parallel_detection(struct link_params *params,
1102 u8 phy_flags)
1103{
1104 struct bnx2x *bp = params->bp;
1105 u16 control2;
1106
1107 CL45_RD_OVER_CL22(bp, params->port,
1108 params->phy_addr,
1109 MDIO_REG_BANK_SERDES_DIGITAL,
1110 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1111 &control2);
1112 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1113 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 else
1115 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1116 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1117 params->speed_cap_mask, control2);
1118 CL45_WR_OVER_CL22(bp, params->port,
1119 params->phy_addr,
1120 MDIO_REG_BANK_SERDES_DIGITAL,
1121 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1122 control2);
1123
1124 if ((phy_flags & PHY_XGXS_FLAG) &&
1125 (params->speed_cap_mask &
1126 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1127 DP(NETIF_MSG_LINK, "XGXS\n");
1128
1129 CL45_WR_OVER_CL22(bp, params->port,
1130 params->phy_addr,
1131 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1132 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1133 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1134
1135 CL45_RD_OVER_CL22(bp, params->port,
1136 params->phy_addr,
1137 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1138 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1139 &control2);
1140
1141
1142 control2 |=
1143 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1144
1145 CL45_WR_OVER_CL22(bp, params->port,
1146 params->phy_addr,
1147 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1148 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1149 control2);
1150
1151 /* Disable parallel detection of HiG */
1152 CL45_WR_OVER_CL22(bp, params->port,
1153 params->phy_addr,
1154 MDIO_REG_BANK_XGXS_BLOCK2,
1155 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1156 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
1157 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1158 }
1159}
1160
1161static void bnx2x_set_autoneg(struct link_params *params,
1162 struct link_vars *vars,
1163 u8 enable_cl73)
1164{
1165 struct bnx2x *bp = params->bp;
1166 u16 reg_val;
1167
1168 /* CL37 Autoneg */
1169
1170 CL45_RD_OVER_CL22(bp, params->port,
1171 params->phy_addr,
1172 MDIO_REG_BANK_COMBO_IEEE0,
1173 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1174
1175 /* CL37 Autoneg Enabled */
1176 if (vars->line_speed == SPEED_AUTO_NEG)
1177 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
1178 else /* CL37 Autoneg Disabled */
1179 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1180 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1181
1182 CL45_WR_OVER_CL22(bp, params->port,
1183 params->phy_addr,
1184 MDIO_REG_BANK_COMBO_IEEE0,
1185 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1186
1187 /* Enable/Disable Autodetection */
1188
1189 CL45_RD_OVER_CL22(bp, params->port,
1190 params->phy_addr,
1191 MDIO_REG_BANK_SERDES_DIGITAL,
1192 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1193 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
1194 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
1195 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
1196 if (vars->line_speed == SPEED_AUTO_NEG)
1197 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1198 else
1199 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1200
1201 CL45_WR_OVER_CL22(bp, params->port,
1202 params->phy_addr,
1203 MDIO_REG_BANK_SERDES_DIGITAL,
1204 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1205
1206 /* Enable TetonII and BAM autoneg */
1207 CL45_RD_OVER_CL22(bp, params->port,
1208 params->phy_addr,
1209 MDIO_REG_BANK_BAM_NEXT_PAGE,
1210 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1211 &reg_val);
1212 if (vars->line_speed == SPEED_AUTO_NEG) {
1213 /* Enable BAM aneg Mode and TetonII aneg Mode */
1214 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1215 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1216 } else {
1217 /* TetonII and BAM Autoneg Disabled */
1218 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1219 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1220 }
1221 CL45_WR_OVER_CL22(bp, params->port,
1222 params->phy_addr,
1223 MDIO_REG_BANK_BAM_NEXT_PAGE,
1224 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1225 reg_val);
1226
1227 if (enable_cl73) {
1228 /* Enable Cl73 FSM status bits */
1229 CL45_WR_OVER_CL22(bp, params->port,
1230 params->phy_addr,
1231 MDIO_REG_BANK_CL73_USERB0,
1232 MDIO_CL73_USERB0_CL73_UCTRL,
1233 0xe);
1234
1235 /* Enable BAM Station Manager*/
1236 CL45_WR_OVER_CL22(bp, params->port,
1237 params->phy_addr,
1238 MDIO_REG_BANK_CL73_USERB0,
1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
1241 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
1242 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1243
1244 /* Advertise CL73 link speeds */
1245 CL45_RD_OVER_CL22(bp, params->port,
1246 params->phy_addr,
1247 MDIO_REG_BANK_CL73_IEEEB1,
1248 MDIO_CL73_IEEEB1_AN_ADV2,
1249 &reg_val);
1250 if (params->speed_cap_mask &
1251 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1252 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1253 if (params->speed_cap_mask &
1254 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1255 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1256
1257 CL45_WR_OVER_CL22(bp, params->port,
1258 params->phy_addr,
1259 MDIO_REG_BANK_CL73_IEEEB1,
1260 MDIO_CL73_IEEEB1_AN_ADV2,
1261 reg_val);
1262
1263 /* CL73 Autoneg Enabled */
1264 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
1265
1266 } else /* CL73 Autoneg Disabled */
1267 reg_val = 0;
1268
1269 CL45_WR_OVER_CL22(bp, params->port,
1270 params->phy_addr,
1271 MDIO_REG_BANK_CL73_IEEEB0,
1272 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
1273}
1274
1275/* program SerDes, forced speed */
1276static void bnx2x_program_serdes(struct link_params *params,
1277 struct link_vars *vars)
1278{
1279 struct bnx2x *bp = params->bp;
1280 u16 reg_val;
1281
1282 /* program duplex, disable autoneg and sgmii*/
1283 CL45_RD_OVER_CL22(bp, params->port,
1284 params->phy_addr,
1285 MDIO_REG_BANK_COMBO_IEEE0,
1286 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1287 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
1288 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1289 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
1290 if (params->req_duplex == DUPLEX_FULL)
1291 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1292 CL45_WR_OVER_CL22(bp, params->port,
1293 params->phy_addr,
1294 MDIO_REG_BANK_COMBO_IEEE0,
1295 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1296
1297 /* program speed
1298 - needed only if the speed is greater than 1G (2.5G or 10G) */
1299 CL45_RD_OVER_CL22(bp, params->port,
1300 params->phy_addr,
1301 MDIO_REG_BANK_SERDES_DIGITAL,
1302 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1303 /* clearing the speed value before setting the right speed */
1304 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
1305
1306 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
1307 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1308
1309 if (!((vars->line_speed == SPEED_1000) ||
1310 (vars->line_speed == SPEED_100) ||
1311 (vars->line_speed == SPEED_10))) {
1312
1313 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
1314 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1315 if (vars->line_speed == SPEED_10000)
1316 reg_val |=
1317 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
1318 if (vars->line_speed == SPEED_13000)
1319 reg_val |=
1320 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1321 }
1322
1323 CL45_WR_OVER_CL22(bp, params->port,
1324 params->phy_addr,
1325 MDIO_REG_BANK_SERDES_DIGITAL,
1326 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1327
1328}
1329
1330static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1331{
1332 struct bnx2x *bp = params->bp;
1333 u16 val = 0;
1334
1335 /* configure the 48 bits for BAM AN */
1336
1337 /* set extended capabilities */
1338 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
1339 val |= MDIO_OVER_1G_UP1_2_5G;
1340 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1341 val |= MDIO_OVER_1G_UP1_10G;
1342 CL45_WR_OVER_CL22(bp, params->port,
1343 params->phy_addr,
1344 MDIO_REG_BANK_OVER_1G,
1345 MDIO_OVER_1G_UP1, val);
1346
1347 CL45_WR_OVER_CL22(bp, params->port,
1348 params->phy_addr,
1349 MDIO_REG_BANK_OVER_1G,
1350 MDIO_OVER_1G_UP3, 0x400);
1351}
1352
1353static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1354{
1355 struct bnx2x *bp = params->bp;
1356 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1357 /* resolve pause mode and advertisement
1358 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1359
1360 switch (params->req_flow_ctrl) {
1361 case BNX2X_FLOW_CTRL_AUTO:
1362 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
1363 *ieee_fc |=
1364 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1365 } else {
1366 *ieee_fc |=
1367 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1368 }
1369 break;
1370 case BNX2X_FLOW_CTRL_TX:
1371 *ieee_fc |=
1372 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1373 break;
1374
1375 case BNX2X_FLOW_CTRL_RX:
1376 case BNX2X_FLOW_CTRL_BOTH:
1377 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1378 break;
1379
1380 case BNX2X_FLOW_CTRL_NONE:
1381 default:
1382 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1383 break;
1384 }
1385 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1386}
1387
1388static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1389 u16 ieee_fc)
1390{
1391 struct bnx2x *bp = params->bp;
1392 u16 val;
1393 /* for AN, we are always publishing full duplex */
1394
1395 CL45_WR_OVER_CL22(bp, params->port,
1396 params->phy_addr,
1397 MDIO_REG_BANK_COMBO_IEEE0,
1398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1399 CL45_RD_OVER_CL22(bp, params->port,
1400 params->phy_addr,
1401 MDIO_REG_BANK_CL73_IEEEB1,
1402 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1403 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1404 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1405 CL45_WR_OVER_CL22(bp, params->port,
1406 params->phy_addr,
1407 MDIO_REG_BANK_CL73_IEEEB1,
1408 MDIO_CL73_IEEEB1_AN_ADV1, val);
1409}
1410
1411static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1412{
1413 struct bnx2x *bp = params->bp;
1414 u16 mii_control;
1415
1416 DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
1417 /* Enable and restart BAM/CL37 aneg */
1418
1419 if (enable_cl73) {
1420 CL45_RD_OVER_CL22(bp, params->port,
1421 params->phy_addr,
1422 MDIO_REG_BANK_CL73_IEEEB0,
1423 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1424 &mii_control);
1425
1426 CL45_WR_OVER_CL22(bp, params->port,
1427 params->phy_addr,
1428 MDIO_REG_BANK_CL73_IEEEB0,
1429 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1430 (mii_control |
1431 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
1432 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
1433 } else {
1434
1435 CL45_RD_OVER_CL22(bp, params->port,
1436 params->phy_addr,
1437 MDIO_REG_BANK_COMBO_IEEE0,
1438 MDIO_COMBO_IEEE0_MII_CONTROL,
1439 &mii_control);
1440 DP(NETIF_MSG_LINK,
1441 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
1442 mii_control);
1443 CL45_WR_OVER_CL22(bp, params->port,
1444 params->phy_addr,
1445 MDIO_REG_BANK_COMBO_IEEE0,
1446 MDIO_COMBO_IEEE0_MII_CONTROL,
1447 (mii_control |
1448 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1449 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
1450 }
1451}
1452
1453static void bnx2x_initialize_sgmii_process(struct link_params *params,
1454 struct link_vars *vars)
1455{
1456 struct bnx2x *bp = params->bp;
1457 u16 control1;
1458
1459 /* in SGMII mode, the unicore is always slave */
1460
1461 CL45_RD_OVER_CL22(bp, params->port,
1462 params->phy_addr,
1463 MDIO_REG_BANK_SERDES_DIGITAL,
1464 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1465 &control1);
1466 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
1467 /* set sgmii mode (and not fiber) */
1468 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
1469 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
1470 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
1471 CL45_WR_OVER_CL22(bp, params->port,
1472 params->phy_addr,
1473 MDIO_REG_BANK_SERDES_DIGITAL,
1474 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1475 control1);
1476
1477 /* if forced speed */
1478 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
1479 /* set speed, disable autoneg */
1480 u16 mii_control;
1481
1482 CL45_RD_OVER_CL22(bp, params->port,
1483 params->phy_addr,
1484 MDIO_REG_BANK_COMBO_IEEE0,
1485 MDIO_COMBO_IEEE0_MII_CONTROL,
1486 &mii_control);
1487 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1488 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
1489 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
1490
1491 switch (vars->line_speed) {
1492 case SPEED_100:
1493 mii_control |=
1494 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
1495 break;
1496 case SPEED_1000:
1497 mii_control |=
1498 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
1499 break;
1500 case SPEED_10:
1501 /* there is nothing to set for 10M */
1502 break;
1503 default:
1504 /* invalid speed for SGMII */
1505 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
1506 vars->line_speed);
1507 break;
1508 }
1509
1510 /* setting the full duplex */
1511 if (params->req_duplex == DUPLEX_FULL)
1512 mii_control |=
1513 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1514 CL45_WR_OVER_CL22(bp, params->port,
1515 params->phy_addr,
1516 MDIO_REG_BANK_COMBO_IEEE0,
1517 MDIO_COMBO_IEEE0_MII_CONTROL,
1518 mii_control);
1519
1520 } else { /* AN mode */
1521 /* enable and restart AN */
1522 bnx2x_restart_autoneg(params, 0);
1523 }
1524}
1525
1526
1527/*
1528 * link management
1529 */
1530
1531static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1532{ /* LD LP */
1533 switch (pause_result) { /* ASYM P ASYM P */
1534 case 0xb: /* 1 0 1 1 */
1535 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
1536 break;
1537
1538 case 0xe: /* 1 1 1 0 */
1539 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
1540 break;
1541
1542 case 0x5: /* 0 1 0 1 */
1543 case 0x7: /* 0 1 1 1 */
1544 case 0xd: /* 1 1 0 1 */
1545 case 0xf: /* 1 1 1 1 */
1546 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
1547 break;
1548
1549 default:
1550 break;
1551 }
1552}
1553
1554static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1555 struct link_vars *vars)
1556{
1557 struct bnx2x *bp = params->bp;
1558 u8 ext_phy_addr;
1559 u16 ld_pause; /* local */
1560 u16 lp_pause; /* link partner */
1561 u16 an_complete; /* AN complete */
1562 u16 pause_result;
1563 u8 ret = 0;
1564 u32 ext_phy_type;
1565 u8 port = params->port;
1566 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
1567 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
1568 /* read twice */
1569
1570 bnx2x_cl45_read(bp, port,
1571 ext_phy_type,
1572 ext_phy_addr,
1573 MDIO_AN_DEVAD,
1574 MDIO_AN_REG_STATUS, &an_complete);
1575 bnx2x_cl45_read(bp, port,
1576 ext_phy_type,
1577 ext_phy_addr,
1578 MDIO_AN_DEVAD,
1579 MDIO_AN_REG_STATUS, &an_complete);
1580
1581 if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
1582 ret = 1;
1583 bnx2x_cl45_read(bp, port,
1584 ext_phy_type,
1585 ext_phy_addr,
1586 MDIO_AN_DEVAD,
1587 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
1588 bnx2x_cl45_read(bp, port,
1589 ext_phy_type,
1590 ext_phy_addr,
1591 MDIO_AN_DEVAD,
1592 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
1593 pause_result = (ld_pause &
1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
1595 pause_result |= (lp_pause &
1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
1598 pause_result);
1599 bnx2x_pause_resolve(vars, pause_result);
1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
1601 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1602 bnx2x_cl45_read(bp, port,
1603 ext_phy_type,
1604 ext_phy_addr,
1605 MDIO_AN_DEVAD,
1606 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1607
1608 bnx2x_cl45_read(bp, port,
1609 ext_phy_type,
1610 ext_phy_addr,
1611 MDIO_AN_DEVAD,
1612 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1613 pause_result = (ld_pause &
1614 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1615 pause_result |= (lp_pause &
1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1617
1618 bnx2x_pause_resolve(vars, pause_result);
1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
1620 pause_result);
1621 }
1622 }
1623 return ret;
1624}
1625
1626static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1627{
1628 struct bnx2x *bp = params->bp;
1629 u16 pd_10g, status2_1000x;
1630 CL45_RD_OVER_CL22(bp, params->port,
1631 params->phy_addr,
1632 MDIO_REG_BANK_SERDES_DIGITAL,
1633 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1634 &status2_1000x);
1635 CL45_RD_OVER_CL22(bp, params->port,
1636 params->phy_addr,
1637 MDIO_REG_BANK_SERDES_DIGITAL,
1638 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1639 &status2_1000x);
1640 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
1641 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
1642 params->port);
1643 return 1;
1644 }
1645
1646 CL45_RD_OVER_CL22(bp, params->port,
1647 params->phy_addr,
1648 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1649 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1650 &pd_10g);
1651
1652 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
1653 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
1654 params->port);
1655 return 1;
1656 }
1657 return 0;
1658}
1659
1660static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1661 struct link_vars *vars,
1662 u32 gp_status)
1663{
1664 struct bnx2x *bp = params->bp;
1665 u16 ld_pause; /* local driver */
1666 u16 lp_pause; /* link partner */
1667 u16 pause_result;
1668
1669 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1670
1671 /* resolve from gp_status in case of AN complete and not sgmii */
1672 if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1673 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1674 (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
1675 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1676 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1677 if (bnx2x_direct_parallel_detect_used(params)) {
1678 vars->flow_ctrl = params->req_fc_auto_adv;
1679 return;
1680 }
1681 if ((gp_status &
1682 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1683 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
1684 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1685 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1686
1687 CL45_RD_OVER_CL22(bp, params->port,
1688 params->phy_addr,
1689 MDIO_REG_BANK_CL73_IEEEB1,
1690 MDIO_CL73_IEEEB1_AN_ADV1,
1691 &ld_pause);
1692 CL45_RD_OVER_CL22(bp, params->port,
1693 params->phy_addr,
1694 MDIO_REG_BANK_CL73_IEEEB1,
1695 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1696 &lp_pause);
1697 pause_result = (ld_pause &
1698 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
1699 >> 8;
1700 pause_result |= (lp_pause &
1701 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
1702 >> 10;
1703 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1704 pause_result);
1705 } else {
1706
1707 CL45_RD_OVER_CL22(bp, params->port,
1708 params->phy_addr,
1709 MDIO_REG_BANK_COMBO_IEEE0,
1710 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1711 &ld_pause);
1712 CL45_RD_OVER_CL22(bp, params->port,
1713 params->phy_addr,
1714 MDIO_REG_BANK_COMBO_IEEE0,
1715 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1716 &lp_pause);
1717 pause_result = (ld_pause &
1718 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1719 pause_result |= (lp_pause &
1720 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1721 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
1722 pause_result);
1723 }
1724 bnx2x_pause_resolve(vars, pause_result);
1725 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1726 (bnx2x_ext_phy_resolve_fc(params, vars))) {
1727 return;
1728 } else {
1729 if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
1730 vars->flow_ctrl = params->req_fc_auto_adv;
1731 else
1732 vars->flow_ctrl = params->req_flow_ctrl;
1733 }
1734 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1735}
1736
1737static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1738{
1739 struct bnx2x *bp = params->bp;
1740 u16 rx_status, ustat_val, cl37_fsm_recieved;
1741 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
1742 /* Step 1: Make sure signal is detected */
1743 CL45_RD_OVER_CL22(bp, params->port,
1744 params->phy_addr,
1745 MDIO_REG_BANK_RX0,
1746 MDIO_RX0_RX_STATUS,
1747 &rx_status);
1748 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
1749 (MDIO_RX0_RX_STATUS_SIGDET)) {
1750 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
1751 "rx_status(0x80b0) = 0x%x\n", rx_status);
1752 CL45_WR_OVER_CL22(bp, params->port,
1753 params->phy_addr,
1754 MDIO_REG_BANK_CL73_IEEEB0,
1755 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1756 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
1757 return;
1758 }
1759 /* Step 2: Check CL73 state machine */
1760 CL45_RD_OVER_CL22(bp, params->port,
1761 params->phy_addr,
1762 MDIO_REG_BANK_CL73_USERB0,
1763 MDIO_CL73_USERB0_CL73_USTAT1,
1764 &ustat_val);
1765 if ((ustat_val &
1766 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
1767 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
1768 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
1769 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
1770 DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
1771 "ustat_val(0x8371) = 0x%x\n", ustat_val);
1772 return;
1773 }
1774 /* Step 3: Check CL37 Message Pages received to indicate LP
1775 supports only CL37 */
1776 CL45_RD_OVER_CL22(bp, params->port,
1777 params->phy_addr,
1778 MDIO_REG_BANK_REMOTE_PHY,
1779 MDIO_REMOTE_PHY_MISC_RX_STATUS,
1780 &cl37_fsm_recieved);
1781 if ((cl37_fsm_recieved &
1782 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
1783 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
1784 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
1785 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
1786 DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
1787 "misc_rx_status(0x8330) = 0x%x\n",
1788 cl37_fsm_recieved);
1789 return;
1790 }
1791 /* The combined cl37/cl73 fsm state information indicating that we are
1792 connected to a device which does not support cl73, but does support
1793 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
1794 /* Disable CL73 */
1795 CL45_WR_OVER_CL22(bp, params->port,
1796 params->phy_addr,
1797 MDIO_REG_BANK_CL73_IEEEB0,
1798 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1799 0);
1800 /* Restart CL37 autoneg */
1801 bnx2x_restart_autoneg(params, 0);
1802 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
1803}
1804static u8 bnx2x_link_settings_status(struct link_params *params,
1805 struct link_vars *vars,
1806 u32 gp_status,
1807 u8 ext_phy_link_up)
1808{
1809 struct bnx2x *bp = params->bp;
1810 u16 new_line_speed;
1811 u8 rc = 0;
1812 vars->link_status = 0;
1813
1814 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1815 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
1816 gp_status);
1817
1818 vars->phy_link_up = 1;
1819 vars->link_status |= LINK_STATUS_LINK_UP;
1820
1821 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1822 vars->duplex = DUPLEX_FULL;
1823 else
1824 vars->duplex = DUPLEX_HALF;
1825
1826 bnx2x_flow_ctrl_resolve(params, vars, gp_status);
1827
1828 switch (gp_status & GP_STATUS_SPEED_MASK) {
1829 case GP_STATUS_10M:
1830 new_line_speed = SPEED_10;
1831 if (vars->duplex == DUPLEX_FULL)
1832 vars->link_status |= LINK_10TFD;
1833 else
1834 vars->link_status |= LINK_10THD;
1835 break;
1836
1837 case GP_STATUS_100M:
1838 new_line_speed = SPEED_100;
1839 if (vars->duplex == DUPLEX_FULL)
1840 vars->link_status |= LINK_100TXFD;
1841 else
1842 vars->link_status |= LINK_100TXHD;
1843 break;
1844
1845 case GP_STATUS_1G:
1846 case GP_STATUS_1G_KX:
1847 new_line_speed = SPEED_1000;
1848 if (vars->duplex == DUPLEX_FULL)
1849 vars->link_status |= LINK_1000TFD;
1850 else
1851 vars->link_status |= LINK_1000THD;
1852 break;
1853
1854 case GP_STATUS_2_5G:
1855 new_line_speed = SPEED_2500;
1856 if (vars->duplex == DUPLEX_FULL)
1857 vars->link_status |= LINK_2500TFD;
1858 else
1859 vars->link_status |= LINK_2500THD;
1860 break;
1861
1862 case GP_STATUS_5G:
1863 case GP_STATUS_6G:
1864 DP(NETIF_MSG_LINK,
1865 "link speed unsupported gp_status 0x%x\n",
1866 gp_status);
1867 return -EINVAL;
1868
1869 case GP_STATUS_10G_KX4:
1870 case GP_STATUS_10G_HIG:
1871 case GP_STATUS_10G_CX4:
1872 new_line_speed = SPEED_10000;
1873 vars->link_status |= LINK_10GTFD;
1874 break;
1875
1876 case GP_STATUS_12G_HIG:
1877 new_line_speed = SPEED_12000;
1878 vars->link_status |= LINK_12GTFD;
1879 break;
1880
1881 case GP_STATUS_12_5G:
1882 new_line_speed = SPEED_12500;
1883 vars->link_status |= LINK_12_5GTFD;
1884 break;
1885
1886 case GP_STATUS_13G:
1887 new_line_speed = SPEED_13000;
1888 vars->link_status |= LINK_13GTFD;
1889 break;
1890
1891 case GP_STATUS_15G:
1892 new_line_speed = SPEED_15000;
1893 vars->link_status |= LINK_15GTFD;
1894 break;
1895
1896 case GP_STATUS_16G:
1897 new_line_speed = SPEED_16000;
1898 vars->link_status |= LINK_16GTFD;
1899 break;
1900
1901 default:
1902 DP(NETIF_MSG_LINK,
1903 "link speed unsupported gp_status 0x%x\n",
1904 gp_status);
1905 return -EINVAL;
1906 }
1907
1908 /* Upon link speed change set the NIG into drain mode.
1909 Comes to deals with possible FIFO glitch due to clk change
1910 when speed is decreased without link down indicator */
1911 if (new_line_speed != vars->line_speed) {
1912 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) !=
1913 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT &&
1914 ext_phy_link_up) {
1915 DP(NETIF_MSG_LINK, "Internal link speed %d is"
1916 " different than the external"
1917 " link speed %d\n", new_line_speed,
1918 vars->line_speed);
1919 vars->phy_link_up = 0;
1920 return 0;
1921 }
1922 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
1923 + params->port*4, 0);
1924 msleep(1);
1925 }
1926 vars->line_speed = new_line_speed;
1927 vars->link_status |= LINK_STATUS_SERDES_LINK;
1928
1929 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1930 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1936 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1937 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1938 vars->autoneg = AUTO_NEG_ENABLED;
1939
1940 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
1941 vars->autoneg |= AUTO_NEG_COMPLETE;
1942 vars->link_status |=
1943 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1944 }
1945
1946 vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
1947 vars->link_status |=
1948 LINK_STATUS_PARALLEL_DETECTION_USED;
1949
1950 }
1951 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1952 vars->link_status |=
1953 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1954
1955 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1956 vars->link_status |=
1957 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1958
1959 } else { /* link_down */
1960 DP(NETIF_MSG_LINK, "phy link down\n");
1961
1962 vars->phy_link_up = 0;
1963
1964 vars->duplex = DUPLEX_FULL;
1965 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1966 vars->autoneg = AUTO_NEG_DISABLED;
1967 vars->mac_type = MAC_TYPE_NONE;
1968
1969 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1970 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1971 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT))) {
1972 /* Check signal is detected */
1973 bnx2x_check_fallback_to_cl37(params);
1974 }
1975 }
1976
1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
1978 gp_status, vars->phy_link_up, vars->line_speed);
1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
1980 " autoneg 0x%x\n",
1981 vars->duplex,
1982 vars->flow_ctrl, vars->autoneg);
1983 DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
1984
1985 return rc;
1986}
1987
1988static void bnx2x_set_gmii_tx_driver(struct link_params *params)
1989{
1990 struct bnx2x *bp = params->bp;
1991 u16 lp_up2;
1992 u16 tx_driver;
1993 u16 bank;
1994
1995 /* read precomp */
1996 CL45_RD_OVER_CL22(bp, params->port,
1997 params->phy_addr,
1998 MDIO_REG_BANK_OVER_1G,
1999 MDIO_OVER_1G_LP_UP2, &lp_up2);
2000
2001 /* bits [10:7] at lp_up2, positioned at [15:12] */
2002 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2003 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2004 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2005
2006 if (lp_up2 == 0)
2007 return;
2008
2009 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2010 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2011 CL45_RD_OVER_CL22(bp, params->port,
2012 params->phy_addr,
2013 bank,
2014 MDIO_TX0_TX_DRIVER, &tx_driver);
2015
2016 /* replace tx_driver bits [15:12] */
2017 if (lp_up2 !=
2018 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2019 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2020 tx_driver |= lp_up2;
2021 CL45_WR_OVER_CL22(bp, params->port,
2022 params->phy_addr,
2023 bank,
2024 MDIO_TX0_TX_DRIVER, tx_driver);
2025 }
2026 }
2027}
2028
2029static u8 bnx2x_emac_program(struct link_params *params,
2030 u32 line_speed, u32 duplex)
2031{
2032 struct bnx2x *bp = params->bp;
2033 u8 port = params->port;
2034 u16 mode = 0;
2035
2036 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2037 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
2038 EMAC_REG_EMAC_MODE,
2039 (EMAC_MODE_25G_MODE |
2040 EMAC_MODE_PORT_MII_10M |
2041 EMAC_MODE_HALF_DUPLEX));
2042 switch (line_speed) {
2043 case SPEED_10:
2044 mode |= EMAC_MODE_PORT_MII_10M;
2045 break;
2046
2047 case SPEED_100:
2048 mode |= EMAC_MODE_PORT_MII;
2049 break;
2050
2051 case SPEED_1000:
2052 mode |= EMAC_MODE_PORT_GMII;
2053 break;
2054
2055 case SPEED_2500:
2056 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2057 break;
2058
2059 default:
2060 /* 10G not valid for EMAC */
2061 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed);
2062 return -EINVAL;
2063 }
2064
2065 if (duplex == DUPLEX_HALF)
2066 mode |= EMAC_MODE_HALF_DUPLEX;
2067 bnx2x_bits_en(bp,
2068 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2069 mode);
2070
2071 bnx2x_set_led(params, LED_MODE_OPER, line_speed);
2072 return 0;
2073}
2074
2075/*****************************************************************************/
2076/* External Phy section */
2077/*****************************************************************************/
2078void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
2079{
2080 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2081 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2082 msleep(1);
2083 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2084 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
2085}
2086
2087static void bnx2x_ext_phy_reset(struct link_params *params,
2088 struct link_vars *vars)
2089{
2090 struct bnx2x *bp = params->bp;
2091 u32 ext_phy_type;
2092 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2093
2094 DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port);
2095 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2096 /* The PHY reset is controled by GPIO 1
2097 * Give it 1ms of reset pulse
2098 */
2099 if (vars->phy_flags & PHY_XGXS_FLAG) {
2100
2101 switch (ext_phy_type) {
2102 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2103 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2104 break;
2105
2106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2108 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
2109
2110 /* Restore normal power mode*/
2111 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2112 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2113 params->port);
2114
2115 /* HW reset */
2116 bnx2x_ext_phy_hw_reset(bp, params->port);
2117
2118 bnx2x_cl45_write(bp, params->port,
2119 ext_phy_type,
2120 ext_phy_addr,
2121 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_CTRL, 0xa040);
2123 break;
2124
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2126 break;
2127
2128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
2129
2130 /* Restore normal power mode*/
2131 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2132 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2133 params->port);
2134
2135 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2136 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2137 params->port);
2138
2139 bnx2x_cl45_write(bp, params->port,
2140 ext_phy_type,
2141 ext_phy_addr,
2142 MDIO_PMA_DEVAD,
2143 MDIO_PMA_REG_CTRL,
2144 1<<15);
2145 break;
2146
2147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2148 DP(NETIF_MSG_LINK, "XGXS 8072\n");
2149
2150 /* Unset Low Power Mode and SW reset */
2151 /* Restore normal power mode*/
2152 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2153 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2154 params->port);
2155
2156 bnx2x_cl45_write(bp, params->port,
2157 ext_phy_type,
2158 ext_phy_addr,
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_CTRL,
2161 1<<15);
2162 break;
2163
2164 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
2165 DP(NETIF_MSG_LINK, "XGXS 8073\n");
2166
2167 /* Restore normal power mode*/
2168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2169 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2170 params->port);
2171
2172 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2173 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2174 params->port);
2175 break;
2176
2177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2178 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
2179
2180 /* Restore normal power mode*/
2181 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2182 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2183 params->port);
2184
2185 /* HW reset */
2186 bnx2x_ext_phy_hw_reset(bp, params->port);
2187 break;
2188
2189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
2190 /* Restore normal power mode*/
2191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2192 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2193 params->port);
2194
2195 /* HW reset */
2196 bnx2x_ext_phy_hw_reset(bp, params->port);
2197
2198 bnx2x_cl45_write(bp, params->port,
2199 ext_phy_type,
2200 ext_phy_addr,
2201 MDIO_PMA_DEVAD,
2202 MDIO_PMA_REG_CTRL,
2203 1<<15);
2204 break;
2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2206 break;
2207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2208 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2209 break;
2210
2211 default:
2212 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2213 params->ext_phy_config);
2214 break;
2215 }
2216
2217 } else { /* SerDes */
2218 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
2219 switch (ext_phy_type) {
2220 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2221 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2222 break;
2223
2224 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2225 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2226 bnx2x_ext_phy_hw_reset(bp, params->port);
2227 break;
2228
2229 default:
2230 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2231 params->ext_phy_config);
2232 break;
2233 }
2234 }
2235}
2236
2237static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
2238 u32 shmem_base, u32 spirom_ver)
2239{
2240 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
2241 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
2242 REG_WR(bp, shmem_base +
2243 offsetof(struct shmem_region,
2244 port_mb[port].ext_phy_fw_version),
2245 spirom_ver);
2246}
2247
2248static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u8 port,
2249 u32 ext_phy_type, u8 ext_phy_addr,
2250 u32 shmem_base)
2251{
2252 u16 fw_ver1, fw_ver2;
2253
2254 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
2255 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2256 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
2257 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2258 bnx2x_save_spirom_version(bp, port, shmem_base,
2259 (u32)(fw_ver1<<16 | fw_ver2));
2260}
2261
2262
2263static void bnx2x_save_8481_spirom_version(struct bnx2x *bp, u8 port,
2264 u8 ext_phy_addr, u32 shmem_base)
2265{
2266 u16 val, fw_ver1, fw_ver2, cnt;
2267 /* For the 32 bits registers in 8481, access via MDIO2ARM interface.*/
2268 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
2269 bnx2x_cl45_write(bp, port,
2270 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2271 ext_phy_addr, MDIO_PMA_DEVAD,
2272 0xA819, 0x0014);
2273 bnx2x_cl45_write(bp, port,
2274 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2275 ext_phy_addr,
2276 MDIO_PMA_DEVAD,
2277 0xA81A,
2278 0xc200);
2279 bnx2x_cl45_write(bp, port,
2280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2281 ext_phy_addr,
2282 MDIO_PMA_DEVAD,
2283 0xA81B,
2284 0x0000);
2285 bnx2x_cl45_write(bp, port,
2286 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2287 ext_phy_addr,
2288 MDIO_PMA_DEVAD,
2289 0xA81C,
2290 0x0300);
2291 bnx2x_cl45_write(bp, port,
2292 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2293 ext_phy_addr,
2294 MDIO_PMA_DEVAD,
2295 0xA817,
2296 0x0009);
2297
2298 for (cnt = 0; cnt < 100; cnt++) {
2299 bnx2x_cl45_read(bp, port,
2300 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2301 ext_phy_addr,
2302 MDIO_PMA_DEVAD,
2303 0xA818,
2304 &val);
2305 if (val & 1)
2306 break;
2307 udelay(5);
2308 }
2309 if (cnt == 100) {
2310 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(1)\n");
2311 bnx2x_save_spirom_version(bp, port,
2312 shmem_base, 0);
2313 return;
2314 }
2315
2316
2317 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
2318 bnx2x_cl45_write(bp, port,
2319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2320 ext_phy_addr, MDIO_PMA_DEVAD,
2321 0xA819, 0x0000);
2322 bnx2x_cl45_write(bp, port,
2323 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2324 ext_phy_addr, MDIO_PMA_DEVAD,
2325 0xA81A, 0xc200);
2326 bnx2x_cl45_write(bp, port,
2327 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2328 ext_phy_addr, MDIO_PMA_DEVAD,
2329 0xA817, 0x000A);
2330 for (cnt = 0; cnt < 100; cnt++) {
2331 bnx2x_cl45_read(bp, port,
2332 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2333 ext_phy_addr,
2334 MDIO_PMA_DEVAD,
2335 0xA818,
2336 &val);
2337 if (val & 1)
2338 break;
2339 udelay(5);
2340 }
2341 if (cnt == 100) {
2342 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(2)\n");
2343 bnx2x_save_spirom_version(bp, port,
2344 shmem_base, 0);
2345 return;
2346 }
2347
2348 /* lower 16 bits of the register SPI_FW_STATUS */
2349 bnx2x_cl45_read(bp, port,
2350 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2351 ext_phy_addr,
2352 MDIO_PMA_DEVAD,
2353 0xA81B,
2354 &fw_ver1);
2355 /* upper 16 bits of register SPI_FW_STATUS */
2356 bnx2x_cl45_read(bp, port,
2357 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2358 ext_phy_addr,
2359 MDIO_PMA_DEVAD,
2360 0xA81C,
2361 &fw_ver2);
2362
2363 bnx2x_save_spirom_version(bp, port,
2364 shmem_base, (fw_ver2<<16) | fw_ver1);
2365}
2366
2367static void bnx2x_bcm8072_external_rom_boot(struct link_params *params)
2368{
2369 struct bnx2x *bp = params->bp;
2370 u8 port = params->port;
2371 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2372 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2373
2374 /* Need to wait 200ms after reset */
2375 msleep(200);
2376 /* Boot port from external ROM
2377 * Set ser_boot_ctl bit in the MISC_CTRL1 register
2378 */
2379 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2380 MDIO_PMA_DEVAD,
2381 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2382
2383 /* Reset internal microprocessor */
2384 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2385 MDIO_PMA_DEVAD,
2386 MDIO_PMA_REG_GEN_CTRL,
2387 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2388 /* set micro reset = 0 */
2389 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2390 MDIO_PMA_DEVAD,
2391 MDIO_PMA_REG_GEN_CTRL,
2392 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2393 /* Reset internal microprocessor */
2394 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2395 MDIO_PMA_DEVAD,
2396 MDIO_PMA_REG_GEN_CTRL,
2397 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2398 /* wait for 100ms for code download via SPI port */
2399 msleep(100);
2400
2401 /* Clear ser_boot_ctl bit */
2402 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2403 MDIO_PMA_DEVAD,
2404 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2405 /* Wait 100ms */
2406 msleep(100);
2407
2408 bnx2x_save_bcm_spirom_ver(bp, port,
2409 ext_phy_type,
2410 ext_phy_addr,
2411 params->shmem_base);
2412}
2413
2414static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
2415{
2416 /* This is only required for 8073A1, version 102 only */
2417
2418 struct bnx2x *bp = params->bp;
2419 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2420 u16 val;
2421
2422 /* Read 8073 HW revision*/
2423 bnx2x_cl45_read(bp, params->port,
2424 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2425 ext_phy_addr,
2426 MDIO_PMA_DEVAD,
2427 MDIO_PMA_REG_8073_CHIP_REV, &val);
2428
2429 if (val != 1) {
2430 /* No need to workaround in 8073 A1 */
2431 return 0;
2432 }
2433
2434 bnx2x_cl45_read(bp, params->port,
2435 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2436 ext_phy_addr,
2437 MDIO_PMA_DEVAD,
2438 MDIO_PMA_REG_ROM_VER2, &val);
2439
2440 /* SNR should be applied only for version 0x102 */
2441 if (val != 0x102)
2442 return 0;
2443
2444 return 1;
2445}
2446
2447static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2448{
2449 struct bnx2x *bp = params->bp;
2450 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2451 u16 val, cnt, cnt1 ;
2452
2453 bnx2x_cl45_read(bp, params->port,
2454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2455 ext_phy_addr,
2456 MDIO_PMA_DEVAD,
2457 MDIO_PMA_REG_8073_CHIP_REV, &val);
2458
2459 if (val > 0) {
2460 /* No need to workaround in 8073 A1 */
2461 return 0;
2462 }
2463 /* XAUI workaround in 8073 A0: */
2464
2465 /* After loading the boot ROM and restarting Autoneg,
2466 poll Dev1, Reg $C820: */
2467
2468 for (cnt = 0; cnt < 1000; cnt++) {
2469 bnx2x_cl45_read(bp, params->port,
2470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2471 ext_phy_addr,
2472 MDIO_PMA_DEVAD,
2473 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
2474 &val);
2475 /* If bit [14] = 0 or bit [13] = 0, continue on with
2476 system initialization (XAUI work-around not required,
2477 as these bits indicate 2.5G or 1G link up). */
2478 if (!(val & (1<<14)) || !(val & (1<<13))) {
2479 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
2480 return 0;
2481 } else if (!(val & (1<<15))) {
2482 DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
2483 /* If bit 15 is 0, then poll Dev1, Reg $C841 until
2484 it's MSB (bit 15) goes to 1 (indicating that the
2485 XAUI workaround has completed),
2486 then continue on with system initialization.*/
2487 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
2488 bnx2x_cl45_read(bp, params->port,
2489 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2490 ext_phy_addr,
2491 MDIO_PMA_DEVAD,
2492 MDIO_PMA_REG_8073_XAUI_WA, &val);
2493 if (val & (1<<15)) {
2494 DP(NETIF_MSG_LINK,
2495 "XAUI workaround has completed\n");
2496 return 0;
2497 }
2498 msleep(3);
2499 }
2500 break;
2501 }
2502 msleep(3);
2503 }
2504 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
2505 return -EINVAL;
2506}
2507
2508static void bnx2x_bcm8073_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
2509 u8 ext_phy_addr,
2510 u32 ext_phy_type,
2511 u32 shmem_base)
2512{
2513 /* Boot port from external ROM */
2514 /* EDC grst */
2515 bnx2x_cl45_write(bp, port,
2516 ext_phy_type,
2517 ext_phy_addr,
2518 MDIO_PMA_DEVAD,
2519 MDIO_PMA_REG_GEN_CTRL,
2520 0x0001);
2521
2522 /* ucode reboot and rst */
2523 bnx2x_cl45_write(bp, port,
2524 ext_phy_type,
2525 ext_phy_addr,
2526 MDIO_PMA_DEVAD,
2527 MDIO_PMA_REG_GEN_CTRL,
2528 0x008c);
2529
2530 bnx2x_cl45_write(bp, port,
2531 ext_phy_type,
2532 ext_phy_addr,
2533 MDIO_PMA_DEVAD,
2534 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2535
2536 /* Reset internal microprocessor */
2537 bnx2x_cl45_write(bp, port,
2538 ext_phy_type,
2539 ext_phy_addr,
2540 MDIO_PMA_DEVAD,
2541 MDIO_PMA_REG_GEN_CTRL,
2542 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2543
2544 /* Release srst bit */
2545 bnx2x_cl45_write(bp, port,
2546 ext_phy_type,
2547 ext_phy_addr,
2548 MDIO_PMA_DEVAD,
2549 MDIO_PMA_REG_GEN_CTRL,
2550 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2551
2552 /* wait for 100ms for code download via SPI port */
2553 msleep(100);
2554
2555 /* Clear ser_boot_ctl bit */
2556 bnx2x_cl45_write(bp, port,
2557 ext_phy_type,
2558 ext_phy_addr,
2559 MDIO_PMA_DEVAD,
2560 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2561
2562 bnx2x_save_bcm_spirom_ver(bp, port,
2563 ext_phy_type,
2564 ext_phy_addr,
2565 shmem_base);
2566}
2567
2568static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
2569 u8 ext_phy_addr,
2570 u32 shmem_base)
2571{
2572 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2573 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2574 shmem_base);
2575}
2576
2577static void bnx2x_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
2578 u8 ext_phy_addr,
2579 u32 shmem_base)
2580{
2581 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2582 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2583 shmem_base);
2584
2585}
2586
2587static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2588{
2589 struct bnx2x *bp = params->bp;
2590 u8 port = params->port;
2591 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2592 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2593
2594 /* Need to wait 100ms after reset */
2595 msleep(100);
2596
2597 /* Micro controller re-boot */
2598 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2599 MDIO_PMA_DEVAD,
2600 MDIO_PMA_REG_GEN_CTRL,
2601 0x018B);
2602
2603 /* Set soft reset */
2604 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2605 MDIO_PMA_DEVAD,
2606 MDIO_PMA_REG_GEN_CTRL,
2607 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2608
2609 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2610 MDIO_PMA_DEVAD,
2611 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2612
2613 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2614 MDIO_PMA_DEVAD,
2615 MDIO_PMA_REG_GEN_CTRL,
2616 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2617
2618 /* wait for 150ms for microcode load */
2619 msleep(150);
2620
2621 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
2622 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2623 MDIO_PMA_DEVAD,
2624 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2625
2626 msleep(200);
2627 bnx2x_save_bcm_spirom_ver(bp, port,
2628 ext_phy_type,
2629 ext_phy_addr,
2630 params->shmem_base);
2631}
2632
2633static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
2634 u32 ext_phy_type, u8 ext_phy_addr,
2635 u8 tx_en)
2636{
2637 u16 val;
2638
2639 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
2640 tx_en, port);
2641 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
2642 bnx2x_cl45_read(bp, port,
2643 ext_phy_type,
2644 ext_phy_addr,
2645 MDIO_PMA_DEVAD,
2646 MDIO_PMA_REG_PHY_IDENTIFIER,
2647 &val);
2648
2649 if (tx_en)
2650 val &= ~(1<<15);
2651 else
2652 val |= (1<<15);
2653
2654 bnx2x_cl45_write(bp, port,
2655 ext_phy_type,
2656 ext_phy_addr,
2657 MDIO_PMA_DEVAD,
2658 MDIO_PMA_REG_PHY_IDENTIFIER,
2659 val);
2660}
2661
2662static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
2663 u16 addr, u8 byte_cnt, u8 *o_buf)
2664{
2665 struct bnx2x *bp = params->bp;
2666 u16 val = 0;
2667 u16 i;
2668 u8 port = params->port;
2669 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2670 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2671
2672 if (byte_cnt > 16) {
2673 DP(NETIF_MSG_LINK, "Reading from eeprom is"
2674 " is limited to 0xf\n");
2675 return -EINVAL;
2676 }
2677 /* Set the read command byte count */
2678 bnx2x_cl45_write(bp, port,
2679 ext_phy_type,
2680 ext_phy_addr,
2681 MDIO_PMA_DEVAD,
2682 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2683 (byte_cnt | 0xa000));
2684
2685 /* Set the read command address */
2686 bnx2x_cl45_write(bp, port,
2687 ext_phy_type,
2688 ext_phy_addr,
2689 MDIO_PMA_DEVAD,
2690 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2691 addr);
2692
2693 /* Activate read command */
2694 bnx2x_cl45_write(bp, port,
2695 ext_phy_type,
2696 ext_phy_addr,
2697 MDIO_PMA_DEVAD,
2698 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2699 0x2c0f);
2700
2701 /* Wait up to 500us for command complete status */
2702 for (i = 0; i < 100; i++) {
2703 bnx2x_cl45_read(bp, port,
2704 ext_phy_type,
2705 ext_phy_addr,
2706 MDIO_PMA_DEVAD,
2707 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2708 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2709 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
2710 break;
2711 udelay(5);
2712 }
2713
2714 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
2715 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
2716 DP(NETIF_MSG_LINK,
2717 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
2718 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
2719 return -EINVAL;
2720 }
2721
2722 /* Read the buffer */
2723 for (i = 0; i < byte_cnt; i++) {
2724 bnx2x_cl45_read(bp, port,
2725 ext_phy_type,
2726 ext_phy_addr,
2727 MDIO_PMA_DEVAD,
2728 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
2729 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
2730 }
2731
2732 for (i = 0; i < 100; i++) {
2733 bnx2x_cl45_read(bp, port,
2734 ext_phy_type,
2735 ext_phy_addr,
2736 MDIO_PMA_DEVAD,
2737 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2738 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2739 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
2740 return 0;;
2741 msleep(1);
2742 }
2743 return -EINVAL;
2744}
2745
2746static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2747 u16 addr, u8 byte_cnt, u8 *o_buf)
2748{
2749 struct bnx2x *bp = params->bp;
2750 u16 val, i;
2751 u8 port = params->port;
2752 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2753 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2754
2755 if (byte_cnt > 16) {
2756 DP(NETIF_MSG_LINK, "Reading from eeprom is"
2757 " is limited to 0xf\n");
2758 return -EINVAL;
2759 }
2760
2761 /* Need to read from 1.8000 to clear it */
2762 bnx2x_cl45_read(bp, port,
2763 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2764 ext_phy_addr,
2765 MDIO_PMA_DEVAD,
2766 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2767 &val);
2768
2769 /* Set the read command byte count */
2770 bnx2x_cl45_write(bp, port,
2771 ext_phy_type,
2772 ext_phy_addr,
2773 MDIO_PMA_DEVAD,
2774 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2775 ((byte_cnt < 2) ? 2 : byte_cnt));
2776
2777 /* Set the read command address */
2778 bnx2x_cl45_write(bp, port,
2779 ext_phy_type,
2780 ext_phy_addr,
2781 MDIO_PMA_DEVAD,
2782 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2783 addr);
2784 /* Set the destination address */
2785 bnx2x_cl45_write(bp, port,
2786 ext_phy_type,
2787 ext_phy_addr,
2788 MDIO_PMA_DEVAD,
2789 0x8004,
2790 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
2791
2792 /* Activate read command */
2793 bnx2x_cl45_write(bp, port,
2794 ext_phy_type,
2795 ext_phy_addr,
2796 MDIO_PMA_DEVAD,
2797 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2798 0x8002);
2799 /* Wait appropriate time for two-wire command to finish before
2800 polling the status register */
2801 msleep(1);
2802
2803 /* Wait up to 500us for command complete status */
2804 for (i = 0; i < 100; i++) {
2805 bnx2x_cl45_read(bp, port,
2806 ext_phy_type,
2807 ext_phy_addr,
2808 MDIO_PMA_DEVAD,
2809 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2810 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2811 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
2812 break;
2813 udelay(5);
2814 }
2815
2816 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
2817 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
2818 DP(NETIF_MSG_LINK,
2819 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
2820 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
2821 return -EINVAL;
2822 }
2823
2824 /* Read the buffer */
2825 for (i = 0; i < byte_cnt; i++) {
2826 bnx2x_cl45_read(bp, port,
2827 ext_phy_type,
2828 ext_phy_addr,
2829 MDIO_PMA_DEVAD,
2830 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
2831 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
2832 }
2833
2834 for (i = 0; i < 100; i++) {
2835 bnx2x_cl45_read(bp, port,
2836 ext_phy_type,
2837 ext_phy_addr,
2838 MDIO_PMA_DEVAD,
2839 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2840 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2841 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
2842 return 0;;
2843 msleep(1);
2844 }
2845
2846 return -EINVAL;
2847}
2848
2849u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
2850 u8 byte_cnt, u8 *o_buf)
2851{
2852 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2853
2854 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
2855 return bnx2x_8726_read_sfp_module_eeprom(params, addr,
2856 byte_cnt, o_buf);
2857 else if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
2858 return bnx2x_8727_read_sfp_module_eeprom(params, addr,
2859 byte_cnt, o_buf);
2860 return -EINVAL;
2861}
2862
2863static u8 bnx2x_get_edc_mode(struct link_params *params,
2864 u16 *edc_mode)
2865{
2866 struct bnx2x *bp = params->bp;
2867 u8 val, check_limiting_mode = 0;
2868 *edc_mode = EDC_MODE_LIMITING;
2869
2870 /* First check for copper cable */
2871 if (bnx2x_read_sfp_module_eeprom(params,
2872 SFP_EEPROM_CON_TYPE_ADDR,
2873 1,
2874 &val) != 0) {
2875 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
2876 return -EINVAL;
2877 }
2878
2879 switch (val) {
2880 case SFP_EEPROM_CON_TYPE_VAL_COPPER:
2881 {
2882 u8 copper_module_type;
2883
2884 /* Check if its active cable( includes SFP+ module)
2885 of passive cable*/
2886 if (bnx2x_read_sfp_module_eeprom(params,
2887 SFP_EEPROM_FC_TX_TECH_ADDR,
2888 1,
2889 &copper_module_type) !=
2890 0) {
2891 DP(NETIF_MSG_LINK,
2892 "Failed to read copper-cable-type"
2893 " from SFP+ EEPROM\n");
2894 return -EINVAL;
2895 }
2896
2897 if (copper_module_type &
2898 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
2899 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
2900 check_limiting_mode = 1;
2901 } else if (copper_module_type &
2902 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
2903 DP(NETIF_MSG_LINK, "Passive Copper"
2904 " cable detected\n");
2905 *edc_mode =
2906 EDC_MODE_PASSIVE_DAC;
2907 } else {
2908 DP(NETIF_MSG_LINK, "Unknown copper-cable-"
2909 "type 0x%x !!!\n", copper_module_type);
2910 return -EINVAL;
2911 }
2912 break;
2913 }
2914 case SFP_EEPROM_CON_TYPE_VAL_LC:
2915 DP(NETIF_MSG_LINK, "Optic module detected\n");
2916 check_limiting_mode = 1;
2917 break;
2918 default:
2919 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
2920 val);
2921 return -EINVAL;
2922 }
2923
2924 if (check_limiting_mode) {
2925 u8 options[SFP_EEPROM_OPTIONS_SIZE];
2926 if (bnx2x_read_sfp_module_eeprom(params,
2927 SFP_EEPROM_OPTIONS_ADDR,
2928 SFP_EEPROM_OPTIONS_SIZE,
2929 options) != 0) {
2930 DP(NETIF_MSG_LINK, "Failed to read Option"
2931 " field from module EEPROM\n");
2932 return -EINVAL;
2933 }
2934 if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
2935 *edc_mode = EDC_MODE_LINEAR;
2936 else
2937 *edc_mode = EDC_MODE_LIMITING;
2938 }
2939 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
2940 return 0;
2941}
2942
2943/* This function read the relevant field from the module ( SFP+ ),
2944 and verify it is compliant with this board */
2945static u8 bnx2x_verify_sfp_module(struct link_params *params)
2946{
2947 struct bnx2x *bp = params->bp;
2948 u32 val;
2949 u32 fw_resp;
2950 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
2951 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
2952
2953 val = REG_RD(bp, params->shmem_base +
2954 offsetof(struct shmem_region, dev_info.
2955 port_feature_config[params->port].config));
2956 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
2957 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
2958 DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
2959 return 0;
2960 }
2961
2962 /* Ask the FW to validate the module */
2963 if (!(params->feature_config_flags &
2964 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY)) {
2965 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
2966 "verification\n");
2967 return -EINVAL;
2968 }
2969
2970 fw_resp = bnx2x_fw_command(bp, DRV_MSG_CODE_VRFY_OPT_MDL);
2971 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
2972 DP(NETIF_MSG_LINK, "Approved module\n");
2973 return 0;
2974 }
2975
2976 /* format the warning message */
2977 if (bnx2x_read_sfp_module_eeprom(params,
2978 SFP_EEPROM_VENDOR_NAME_ADDR,
2979 SFP_EEPROM_VENDOR_NAME_SIZE,
2980 (u8 *)vendor_name))
2981 vendor_name[0] = '\0';
2982 else
2983 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
2984 if (bnx2x_read_sfp_module_eeprom(params,
2985 SFP_EEPROM_PART_NO_ADDR,
2986 SFP_EEPROM_PART_NO_SIZE,
2987 (u8 *)vendor_pn))
2988 vendor_pn[0] = '\0';
2989 else
2990 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
2991
2992 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n",
2993 params->port, vendor_name, vendor_pn);
2994 return -EINVAL;
2995}
2996
2997static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
2998 u16 edc_mode)
2999{
3000 struct bnx2x *bp = params->bp;
3001 u8 port = params->port;
3002 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3003 u16 cur_limiting_mode;
3004
3005 bnx2x_cl45_read(bp, port,
3006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3007 ext_phy_addr,
3008 MDIO_PMA_DEVAD,
3009 MDIO_PMA_REG_ROM_VER2,
3010 &cur_limiting_mode);
3011 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
3012 cur_limiting_mode);
3013
3014 if (edc_mode == EDC_MODE_LIMITING) {
3015 DP(NETIF_MSG_LINK,
3016 "Setting LIMITING MODE\n");
3017 bnx2x_cl45_write(bp, port,
3018 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3019 ext_phy_addr,
3020 MDIO_PMA_DEVAD,
3021 MDIO_PMA_REG_ROM_VER2,
3022 EDC_MODE_LIMITING);
3023 } else { /* LRM mode ( default )*/
3024
3025 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
3026
3027 /* Changing to LRM mode takes quite few seconds.
3028 So do it only if current mode is limiting
3029 ( default is LRM )*/
3030 if (cur_limiting_mode != EDC_MODE_LIMITING)
3031 return 0;
3032
3033 bnx2x_cl45_write(bp, port,
3034 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3035 ext_phy_addr,
3036 MDIO_PMA_DEVAD,
3037 MDIO_PMA_REG_LRM_MODE,
3038 0);
3039 bnx2x_cl45_write(bp, port,
3040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3041 ext_phy_addr,
3042 MDIO_PMA_DEVAD,
3043 MDIO_PMA_REG_ROM_VER2,
3044 0x128);
3045 bnx2x_cl45_write(bp, port,
3046 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3047 ext_phy_addr,
3048 MDIO_PMA_DEVAD,
3049 MDIO_PMA_REG_MISC_CTRL0,
3050 0x4008);
3051 bnx2x_cl45_write(bp, port,
3052 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3053 ext_phy_addr,
3054 MDIO_PMA_DEVAD,
3055 MDIO_PMA_REG_LRM_MODE,
3056 0xaaaa);
3057 }
3058 return 0;
3059}
3060
3061static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
3062 u16 edc_mode)
3063{
3064 struct bnx2x *bp = params->bp;
3065 u8 port = params->port;
3066 u16 phy_identifier;
3067 u16 rom_ver2_val;
3068 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3069
3070 bnx2x_cl45_read(bp, port,
3071 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3072 ext_phy_addr,
3073 MDIO_PMA_DEVAD,
3074 MDIO_PMA_REG_PHY_IDENTIFIER,
3075 &phy_identifier);
3076
3077 bnx2x_cl45_write(bp, port,
3078 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3079 ext_phy_addr,
3080 MDIO_PMA_DEVAD,
3081 MDIO_PMA_REG_PHY_IDENTIFIER,
3082 (phy_identifier & ~(1<<9)));
3083
3084 bnx2x_cl45_read(bp, port,
3085 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3086 ext_phy_addr,
3087 MDIO_PMA_DEVAD,
3088 MDIO_PMA_REG_ROM_VER2,
3089 &rom_ver2_val);
3090 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
3091 bnx2x_cl45_write(bp, port,
3092 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3093 ext_phy_addr,
3094 MDIO_PMA_DEVAD,
3095 MDIO_PMA_REG_ROM_VER2,
3096 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
3097
3098 bnx2x_cl45_write(bp, port,
3099 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3100 ext_phy_addr,
3101 MDIO_PMA_DEVAD,
3102 MDIO_PMA_REG_PHY_IDENTIFIER,
3103 (phy_identifier | (1<<9)));
3104
3105 return 0;
3106}
3107
3108
3109static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params)
3110{
3111 u8 val;
3112 struct bnx2x *bp = params->bp;
3113 u16 timeout;
3114 /* Initialization time after hot-plug may take up to 300ms for some
3115 phys type ( e.g. JDSU ) */
3116 for (timeout = 0; timeout < 60; timeout++) {
3117 if (bnx2x_read_sfp_module_eeprom(params, 1, 1, &val)
3118 == 0) {
3119 DP(NETIF_MSG_LINK, "SFP+ module initialization "
3120 "took %d ms\n", timeout * 5);
3121 return 0;
3122 }
3123 msleep(5);
3124 }
3125 return -EINVAL;
3126}
3127
3128static void bnx2x_8727_power_module(struct bnx2x *bp,
3129 struct link_params *params,
3130 u8 ext_phy_addr, u8 is_power_up) {
3131 /* Make sure GPIOs are not using for LED mode */
3132 u16 val;
3133 u8 port = params->port;
3134 /*
3135 * In the GPIO register, bit 4 is use to detemine if the GPIOs are
3136 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
3137 * output
3138 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
3139 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
3140 * where the 1st bit is the over-current(only input), and 2nd bit is
3141 * for power( only output )
3142 */
3143
3144 /*
3145 * In case of NOC feature is disabled and power is up, set GPIO control
3146 * as input to enable listening of over-current indication
3147 */
3148
3149 if (!(params->feature_config_flags &
3150 FEATURE_CONFIG_BCM8727_NOC) && is_power_up)
3151 val = (1<<4);
3152 else
3153 /*
3154 * Set GPIO control to OUTPUT, and set the power bit
3155 * to according to the is_power_up
3156 */
3157 val = ((!(is_power_up)) << 1);
3158
3159 bnx2x_cl45_write(bp, port,
3160 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3161 ext_phy_addr,
3162 MDIO_PMA_DEVAD,
3163 MDIO_PMA_REG_8727_GPIO_CTRL,
3164 val);
3165}
3166
3167static u8 bnx2x_sfp_module_detection(struct link_params *params)
3168{
3169 struct bnx2x *bp = params->bp;
3170 u16 edc_mode;
3171 u8 rc = 0;
3172 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3173 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3174 u32 val = REG_RD(bp, params->shmem_base +
3175 offsetof(struct shmem_region, dev_info.
3176 port_feature_config[params->port].config));
3177
3178 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
3179 params->port);
3180
3181 if (bnx2x_get_edc_mode(params, &edc_mode) != 0) {
3182 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
3183 return -EINVAL;
3184 } else if (bnx2x_verify_sfp_module(params) !=
3185 0) {
3186 /* check SFP+ module compatibility */
3187 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
3188 rc = -EINVAL;
3189 /* Turn on fault module-detected led */
3190 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3191 MISC_REGISTERS_GPIO_HIGH,
3192 params->port);
3193 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
3194 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3195 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
3196 /* Shutdown SFP+ module */
3197 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
3198 bnx2x_8727_power_module(bp, params,
3199 ext_phy_addr, 0);
3200 return rc;
3201 }
3202 } else {
3203 /* Turn off fault module-detected led */
3204 DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
3205 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3206 MISC_REGISTERS_GPIO_LOW,
3207 params->port);
3208 }
3209
3210 /* power up the SFP module */
3211 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
3212 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
3213
3214 /* Check and set limiting mode / LRM mode on 8726.
3215 On 8727 it is done automatically */
3216 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
3217 bnx2x_bcm8726_set_limiting_mode(params, edc_mode);
3218 else
3219 bnx2x_bcm8727_set_limiting_mode(params, edc_mode);
3220 /*
3221 * Enable transmit for this module if the module is approved, or
3222 * if unapproved modules should also enable the Tx laser
3223 */
3224 if (rc == 0 ||
3225 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
3226 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3227 bnx2x_sfp_set_transmitter(bp, params->port,
3228 ext_phy_type, ext_phy_addr, 1);
3229 else
3230 bnx2x_sfp_set_transmitter(bp, params->port,
3231 ext_phy_type, ext_phy_addr, 0);
3232
3233 return rc;
3234}
3235
3236void bnx2x_handle_module_detect_int(struct link_params *params)
3237{
3238 struct bnx2x *bp = params->bp;
3239 u32 gpio_val;
3240 u8 port = params->port;
3241
3242 /* Set valid module led off */
3243 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3244 MISC_REGISTERS_GPIO_HIGH,
3245 params->port);
3246
3247 /* Get current gpio val refelecting module plugged in / out*/
3248 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
3249
3250 /* Call the handling function in case module is detected */
3251 if (gpio_val == 0) {
3252
3253 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3254 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
3255 port);
3256
3257 if (bnx2x_wait_for_sfp_module_initialized(params) ==
3258 0)
3259 bnx2x_sfp_module_detection(params);
3260 else
3261 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
3262 } else {
3263 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3264
3265 u32 ext_phy_type =
3266 XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3267 u32 val = REG_RD(bp, params->shmem_base +
3268 offsetof(struct shmem_region, dev_info.
3269 port_feature_config[params->port].
3270 config));
3271
3272 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3273 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
3274 port);
3275 /* Module was plugged out. */
3276 /* Disable transmit for this module */
3277 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3278 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3279 bnx2x_sfp_set_transmitter(bp, params->port,
3280 ext_phy_type, ext_phy_addr, 0);
3281 }
3282}
3283
3284static void bnx2x_bcm807x_force_10G(struct link_params *params)
3285{
3286 struct bnx2x *bp = params->bp;
3287 u8 port = params->port;
3288 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3289 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3290
3291 /* Force KR or KX */
3292 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3293 MDIO_PMA_DEVAD,
3294 MDIO_PMA_REG_CTRL,
3295 0x2040);
3296 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3297 MDIO_PMA_DEVAD,
3298 MDIO_PMA_REG_10G_CTRL2,
3299 0x000b);
3300 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3301 MDIO_PMA_DEVAD,
3302 MDIO_PMA_REG_BCM_CTRL,
3303 0x0000);
3304 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3305 MDIO_AN_DEVAD,
3306 MDIO_AN_REG_CTRL,
3307 0x0000);
3308}
3309
3310static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
3311{
3312 struct bnx2x *bp = params->bp;
3313 u8 port = params->port;
3314 u16 val;
3315 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3316 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3317
3318 bnx2x_cl45_read(bp, params->port,
3319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
3320 ext_phy_addr,
3321 MDIO_PMA_DEVAD,
3322 MDIO_PMA_REG_8073_CHIP_REV, &val);
3323
3324 if (val == 0) {
3325 /* Mustn't set low power mode in 8073 A0 */
3326 return;
3327 }
3328
3329 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3330 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3331 MDIO_XS_DEVAD,
3332 MDIO_XS_PLL_SEQUENCER, &val);
3333 val &= ~(1<<13);
3334 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3335 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3336
3337 /* PLL controls */
3338 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3339 MDIO_XS_DEVAD, 0x805E, 0x1077);
3340 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3341 MDIO_XS_DEVAD, 0x805D, 0x0000);
3342 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3343 MDIO_XS_DEVAD, 0x805C, 0x030B);
3344 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3345 MDIO_XS_DEVAD, 0x805B, 0x1240);
3346 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3347 MDIO_XS_DEVAD, 0x805A, 0x2490);
3348
3349 /* Tx Controls */
3350 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3351 MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3352 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3353 MDIO_XS_DEVAD, 0x80A6, 0x9041);
3354 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3355 MDIO_XS_DEVAD, 0x80A5, 0x4640);
3356
3357 /* Rx Controls */
3358 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3359 MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3360 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3361 MDIO_XS_DEVAD, 0x80FD, 0x9249);
3362 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3363 MDIO_XS_DEVAD, 0x80FC, 0x2015);
3364
3365 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3366 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3367 MDIO_XS_DEVAD,
3368 MDIO_XS_PLL_SEQUENCER, &val);
3369 val |= (1<<13);
3370 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3371 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3372}
3373
3374static void bnx2x_8073_set_pause_cl37(struct link_params *params,
3375 struct link_vars *vars)
3376{
3377 struct bnx2x *bp = params->bp;
3378 u16 cl37_val;
3379 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3380 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3381
3382 bnx2x_cl45_read(bp, params->port,
3383 ext_phy_type,
3384 ext_phy_addr,
3385 MDIO_AN_DEVAD,
3386 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
3387
3388 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3389 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3390
3391 if ((vars->ieee_fc &
3392 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
3393 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
3394 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
3395 }
3396 if ((vars->ieee_fc &
3397 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3399 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3400 }
3401 if ((vars->ieee_fc &
3402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3403 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3404 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3405 }
3406 DP(NETIF_MSG_LINK,
3407 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
3408
3409 bnx2x_cl45_write(bp, params->port,
3410 ext_phy_type,
3411 ext_phy_addr,
3412 MDIO_AN_DEVAD,
3413 MDIO_AN_REG_CL37_FC_LD, cl37_val);
3414 msleep(500);
3415}
3416
3417static void bnx2x_ext_phy_set_pause(struct link_params *params,
3418 struct link_vars *vars)
3419{
3420 struct bnx2x *bp = params->bp;
3421 u16 val;
3422 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3423 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3424
3425 /* read modify write pause advertizing */
3426 bnx2x_cl45_read(bp, params->port,
3427 ext_phy_type,
3428 ext_phy_addr,
3429 MDIO_AN_DEVAD,
3430 MDIO_AN_REG_ADV_PAUSE, &val);
3431
3432 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
3433
3434 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3435
3436 if ((vars->ieee_fc &
3437 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3438 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3439 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3440 }
3441 if ((vars->ieee_fc &
3442 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3443 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3444 val |=
3445 MDIO_AN_REG_ADV_PAUSE_PAUSE;
3446 }
3447 DP(NETIF_MSG_LINK,
3448 "Ext phy AN advertize 0x%x\n", val);
3449 bnx2x_cl45_write(bp, params->port,
3450 ext_phy_type,
3451 ext_phy_addr,
3452 MDIO_AN_DEVAD,
3453 MDIO_AN_REG_ADV_PAUSE, val);
3454}
3455static void bnx2x_set_preemphasis(struct link_params *params)
3456{
3457 u16 bank, i = 0;
3458 struct bnx2x *bp = params->bp;
3459
3460 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
3461 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
3462 CL45_WR_OVER_CL22(bp, params->port,
3463 params->phy_addr,
3464 bank,
3465 MDIO_RX0_RX_EQ_BOOST,
3466 params->xgxs_config_rx[i]);
3467 }
3468
3469 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
3470 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
3471 CL45_WR_OVER_CL22(bp, params->port,
3472 params->phy_addr,
3473 bank,
3474 MDIO_TX0_TX_DRIVER,
3475 params->xgxs_config_tx[i]);
3476 }
3477}
3478
3479
3480static void bnx2x_8481_set_led4(struct link_params *params,
3481 u32 ext_phy_type, u8 ext_phy_addr)
3482{
3483 struct bnx2x *bp = params->bp;
3484
3485 /* PHYC_CTL_LED_CTL */
3486 bnx2x_cl45_write(bp, params->port,
3487 ext_phy_type,
3488 ext_phy_addr,
3489 MDIO_PMA_DEVAD,
3490 MDIO_PMA_REG_8481_LINK_SIGNAL, 0xa482);
3491
3492 /* Unmask LED4 for 10G link */
3493 bnx2x_cl45_write(bp, params->port,
3494 ext_phy_type,
3495 ext_phy_addr,
3496 MDIO_PMA_DEVAD,
3497 MDIO_PMA_REG_8481_SIGNAL_MASK, (1<<6));
3498 /* 'Interrupt Mask' */
3499 bnx2x_cl45_write(bp, params->port,
3500 ext_phy_type,
3501 ext_phy_addr,
3502 MDIO_AN_DEVAD,
3503 0xFFFB, 0xFFFD);
3504}
3505static void bnx2x_8481_set_legacy_led_mode(struct link_params *params,
3506 u32 ext_phy_type, u8 ext_phy_addr)
3507{
3508 struct bnx2x *bp = params->bp;
3509
3510 /* LED1 (10G Link): Disable LED1 when 10/100/1000 link */
3511 /* LED2 (1G/100/10 Link): Enable LED2 when 10/100/1000 link) */
3512 bnx2x_cl45_write(bp, params->port,
3513 ext_phy_type,
3514 ext_phy_addr,
3515 MDIO_AN_DEVAD,
3516 MDIO_AN_REG_8481_LEGACY_SHADOW,
3517 (1<<15) | (0xd << 10) | (0xc<<4) | 0xe);
3518}
3519
3520static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3521 u32 ext_phy_type, u8 ext_phy_addr)
3522{
3523 struct bnx2x *bp = params->bp;
3524 u16 val1;
3525
3526 /* LED1 (10G Link) */
3527 /* Enable continuse based on source 7(10G-link) */
3528 bnx2x_cl45_read(bp, params->port,
3529 ext_phy_type,
3530 ext_phy_addr,
3531 MDIO_PMA_DEVAD,
3532 MDIO_PMA_REG_8481_LINK_SIGNAL,
3533 &val1);
3534 /* Set bit 2 to 0, and bits [1:0] to 10 */
3535 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
3536 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
3537
3538 bnx2x_cl45_write(bp, params->port,
3539 ext_phy_type,
3540 ext_phy_addr,
3541 MDIO_PMA_DEVAD,
3542 MDIO_PMA_REG_8481_LINK_SIGNAL,
3543 val1);
3544
3545 /* Unmask LED1 for 10G link */
3546 bnx2x_cl45_read(bp, params->port,
3547 ext_phy_type,
3548 ext_phy_addr,
3549 MDIO_PMA_DEVAD,
3550 MDIO_PMA_REG_8481_LED1_MASK,
3551 &val1);
3552 /* Set bit 2 to 0, and bits [1:0] to 10 */
3553 val1 |= (1<<7);
3554 bnx2x_cl45_write(bp, params->port,
3555 ext_phy_type,
3556 ext_phy_addr,
3557 MDIO_PMA_DEVAD,
3558 MDIO_PMA_REG_8481_LED1_MASK,
3559 val1);
3560
3561 /* LED2 (1G/100/10G Link) */
3562 /* Mask LED2 for 10G link */
3563 bnx2x_cl45_write(bp, params->port,
3564 ext_phy_type,
3565 ext_phy_addr,
3566 MDIO_PMA_DEVAD,
3567 MDIO_PMA_REG_8481_LED2_MASK,
3568 0);
3569
3570 /* Unmask LED3 for 10G link */
3571 bnx2x_cl45_write(bp, params->port,
3572 ext_phy_type,
3573 ext_phy_addr,
3574 MDIO_PMA_DEVAD,
3575 MDIO_PMA_REG_8481_LED3_MASK,
3576 0x6);
3577 bnx2x_cl45_write(bp, params->port,
3578 ext_phy_type,
3579 ext_phy_addr,
3580 MDIO_PMA_DEVAD,
3581 MDIO_PMA_REG_8481_LED3_BLINK,
3582 0);
3583}
3584
3585
3586static void bnx2x_init_internal_phy(struct link_params *params,
3587 struct link_vars *vars,
3588 u8 enable_cl73)
3589{
3590 struct bnx2x *bp = params->bp;
3591
3592 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
3593 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
3594 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3595 (params->feature_config_flags &
3596 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
3597 bnx2x_set_preemphasis(params);
3598
3599 /* forced speed requested? */
3600 if (vars->line_speed != SPEED_AUTO_NEG ||
3601 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
3602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3603 params->loopback_mode == LOOPBACK_EXT)) {
3604 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3605
3606 /* disable autoneg */
3607 bnx2x_set_autoneg(params, vars, 0);
3608
3609 /* program speed and duplex */
3610 bnx2x_program_serdes(params, vars);
3611
3612 } else { /* AN_mode */
3613 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3614
3615 /* AN enabled */
3616 bnx2x_set_brcm_cl37_advertisment(params);
3617
3618 /* program duplex & pause advertisement (for aneg) */
3619 bnx2x_set_ieee_aneg_advertisment(params,
3620 vars->ieee_fc);
3621
3622 /* enable autoneg */
3623 bnx2x_set_autoneg(params, vars, enable_cl73);
3624
3625 /* enable and restart AN */
3626 bnx2x_restart_autoneg(params, enable_cl73);
3627 }
3628
3629 } else { /* SGMII mode */
3630 DP(NETIF_MSG_LINK, "SGMII\n");
3631
3632 bnx2x_initialize_sgmii_process(params, vars);
3633 }
3634}
3635
3636static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3637{
3638 struct bnx2x *bp = params->bp;
3639 u32 ext_phy_type;
3640 u8 ext_phy_addr;
3641 u16 cnt;
3642 u16 ctrl = 0;
3643 u16 val = 0;
3644 u8 rc = 0;
3645
3646 if (vars->phy_flags & PHY_XGXS_FLAG) {
3647 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3648
3649 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3650 /* Make sure that the soft reset is off (expect for the 8072:
3651 * due to the lock, it will be done inside the specific
3652 * handling)
3653 */
3654 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3655 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3656 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3657 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) &&
3658 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) {
3659 /* Wait for soft reset to get cleared upto 1 sec */
3660 for (cnt = 0; cnt < 1000; cnt++) {
3661 bnx2x_cl45_read(bp, params->port,
3662 ext_phy_type,
3663 ext_phy_addr,
3664 MDIO_PMA_DEVAD,
3665 MDIO_PMA_REG_CTRL, &ctrl);
3666 if (!(ctrl & (1<<15)))
3667 break;
3668 msleep(1);
3669 }
3670 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
3671 ctrl, cnt);
3672 }
3673
3674 switch (ext_phy_type) {
3675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3676 break;
3677
3678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3679 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3680
3681 bnx2x_cl45_write(bp, params->port,
3682 ext_phy_type,
3683 ext_phy_addr,
3684 MDIO_PMA_DEVAD,
3685 MDIO_PMA_REG_MISC_CTRL,
3686 0x8288);
3687 bnx2x_cl45_write(bp, params->port,
3688 ext_phy_type,
3689 ext_phy_addr,
3690 MDIO_PMA_DEVAD,
3691 MDIO_PMA_REG_PHY_IDENTIFIER,
3692 0x7fbf);
3693 bnx2x_cl45_write(bp, params->port,
3694 ext_phy_type,
3695 ext_phy_addr,
3696 MDIO_PMA_DEVAD,
3697 MDIO_PMA_REG_CMU_PLL_BYPASS,
3698 0x0100);
3699 bnx2x_cl45_write(bp, params->port,
3700 ext_phy_type,
3701 ext_phy_addr,
3702 MDIO_WIS_DEVAD,
3703 MDIO_WIS_REG_LASI_CNTL, 0x1);
3704
3705 /* BCM8705 doesn't have microcode, hence the 0 */
3706 bnx2x_save_spirom_version(bp, params->port,
3707 params->shmem_base, 0);
3708 break;
3709
3710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3711 /* Wait until fw is loaded */
3712 for (cnt = 0; cnt < 100; cnt++) {
3713 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3714 ext_phy_addr, MDIO_PMA_DEVAD,
3715 MDIO_PMA_REG_ROM_VER1, &val);
3716 if (val)
3717 break;
3718 msleep(10);
3719 }
3720 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized "
3721 "after %d ms\n", cnt);
3722 if ((params->feature_config_flags &
3723 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3724 u8 i;
3725 u16 reg;
3726 for (i = 0; i < 4; i++) {
3727 reg = MDIO_XS_8706_REG_BANK_RX0 +
3728 i*(MDIO_XS_8706_REG_BANK_RX1 -
3729 MDIO_XS_8706_REG_BANK_RX0);
3730 bnx2x_cl45_read(bp, params->port,
3731 ext_phy_type,
3732 ext_phy_addr,
3733 MDIO_XS_DEVAD,
3734 reg, &val);
3735 /* Clear first 3 bits of the control */
3736 val &= ~0x7;
3737 /* Set control bits according to
3738 configuation */
3739 val |= (params->xgxs_config_rx[i] &
3740 0x7);
3741 DP(NETIF_MSG_LINK, "Setting RX"
3742 "Equalizer to BCM8706 reg 0x%x"
3743 " <-- val 0x%x\n", reg, val);
3744 bnx2x_cl45_write(bp, params->port,
3745 ext_phy_type,
3746 ext_phy_addr,
3747 MDIO_XS_DEVAD,
3748 reg, val);
3749 }
3750 }
3751 /* Force speed */
3752 if (params->req_line_speed == SPEED_10000) {
3753 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3754
3755 bnx2x_cl45_write(bp, params->port,
3756 ext_phy_type,
3757 ext_phy_addr,
3758 MDIO_PMA_DEVAD,
3759 MDIO_PMA_REG_DIGITAL_CTRL,
3760 0x400);
3761 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3762 ext_phy_addr, MDIO_PMA_DEVAD,
3763 MDIO_PMA_REG_LASI_CTRL, 1);
3764 } else {
3765 /* Force 1Gbps using autoneg with 1G
3766 advertisment */
3767
3768 /* Allow CL37 through CL73 */
3769 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3770 bnx2x_cl45_write(bp, params->port,
3771 ext_phy_type,
3772 ext_phy_addr,
3773 MDIO_AN_DEVAD,
3774 MDIO_AN_REG_CL37_CL73,
3775 0x040c);
3776
3777 /* Enable Full-Duplex advertisment on CL37 */
3778 bnx2x_cl45_write(bp, params->port,
3779 ext_phy_type,
3780 ext_phy_addr,
3781 MDIO_AN_DEVAD,
3782 MDIO_AN_REG_CL37_FC_LP,
3783 0x0020);
3784 /* Enable CL37 AN */
3785 bnx2x_cl45_write(bp, params->port,
3786 ext_phy_type,
3787 ext_phy_addr,
3788 MDIO_AN_DEVAD,
3789 MDIO_AN_REG_CL37_AN,
3790 0x1000);
3791 /* 1G support */
3792 bnx2x_cl45_write(bp, params->port,
3793 ext_phy_type,
3794 ext_phy_addr,
3795 MDIO_AN_DEVAD,
3796 MDIO_AN_REG_ADV, (1<<5));
3797
3798 /* Enable clause 73 AN */
3799 bnx2x_cl45_write(bp, params->port,
3800 ext_phy_type,
3801 ext_phy_addr,
3802 MDIO_AN_DEVAD,
3803 MDIO_AN_REG_CTRL,
3804 0x1200);
3805 bnx2x_cl45_write(bp, params->port,
3806 ext_phy_type,
3807 ext_phy_addr,
3808 MDIO_PMA_DEVAD,
3809 MDIO_PMA_REG_RX_ALARM_CTRL,
3810 0x0400);
3811 bnx2x_cl45_write(bp, params->port,
3812 ext_phy_type,
3813 ext_phy_addr,
3814 MDIO_PMA_DEVAD,
3815 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3816
3817 }
3818 bnx2x_save_bcm_spirom_ver(bp, params->port,
3819 ext_phy_type,
3820 ext_phy_addr,
3821 params->shmem_base);
3822 break;
3823 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3824 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
3825 bnx2x_bcm8726_external_rom_boot(params);
3826
3827 /* Need to call module detected on initialization since
3828 the module detection triggered by actual module
3829 insertion might occur before driver is loaded, and when
3830 driver is loaded, it reset all registers, including the
3831 transmitter */
3832 bnx2x_sfp_module_detection(params);
3833
3834 /* Set Flow control */
3835 bnx2x_ext_phy_set_pause(params, vars);
3836 if (params->req_line_speed == SPEED_1000) {
3837 DP(NETIF_MSG_LINK, "Setting 1G force\n");
3838 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3839 ext_phy_addr, MDIO_PMA_DEVAD,
3840 MDIO_PMA_REG_CTRL, 0x40);
3841 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3842 ext_phy_addr, MDIO_PMA_DEVAD,
3843 MDIO_PMA_REG_10G_CTRL2, 0xD);
3844 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3845 ext_phy_addr, MDIO_PMA_DEVAD,
3846 MDIO_PMA_REG_LASI_CTRL, 0x5);
3847 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3848 ext_phy_addr, MDIO_PMA_DEVAD,
3849 MDIO_PMA_REG_RX_ALARM_CTRL,
3850 0x400);
3851 } else if ((params->req_line_speed ==
3852 SPEED_AUTO_NEG) &&
3853 ((params->speed_cap_mask &
3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
3855 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
3856 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3857 ext_phy_addr, MDIO_AN_DEVAD,
3858 MDIO_AN_REG_ADV, 0x20);
3859 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3860 ext_phy_addr, MDIO_AN_DEVAD,
3861 MDIO_AN_REG_CL37_CL73, 0x040c);
3862 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3863 ext_phy_addr, MDIO_AN_DEVAD,
3864 MDIO_AN_REG_CL37_FC_LD, 0x0020);
3865 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3866 ext_phy_addr, MDIO_AN_DEVAD,
3867 MDIO_AN_REG_CL37_AN, 0x1000);
3868 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3869 ext_phy_addr, MDIO_AN_DEVAD,
3870 MDIO_AN_REG_CTRL, 0x1200);
3871
3872 /* Enable RX-ALARM control to receive
3873 interrupt for 1G speed change */
3874 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3875 ext_phy_addr, MDIO_PMA_DEVAD,
3876 MDIO_PMA_REG_LASI_CTRL, 0x4);
3877 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3878 ext_phy_addr, MDIO_PMA_DEVAD,
3879 MDIO_PMA_REG_RX_ALARM_CTRL,
3880 0x400);
3881
3882 } else { /* Default 10G. Set only LASI control */
3883 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3884 ext_phy_addr, MDIO_PMA_DEVAD,
3885 MDIO_PMA_REG_LASI_CTRL, 1);
3886 }
3887
3888 /* Set TX PreEmphasis if needed */
3889 if ((params->feature_config_flags &
3890 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3891 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
3892 "TX_CTRL2 0x%x\n",
3893 params->xgxs_config_tx[0],
3894 params->xgxs_config_tx[1]);
3895 bnx2x_cl45_write(bp, params->port,
3896 ext_phy_type,
3897 ext_phy_addr,
3898 MDIO_PMA_DEVAD,
3899 MDIO_PMA_REG_8726_TX_CTRL1,
3900 params->xgxs_config_tx[0]);
3901
3902 bnx2x_cl45_write(bp, params->port,
3903 ext_phy_type,
3904 ext_phy_addr,
3905 MDIO_PMA_DEVAD,
3906 MDIO_PMA_REG_8726_TX_CTRL2,
3907 params->xgxs_config_tx[1]);
3908 }
3909 break;
3910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3912 {
3913 u16 tmp1;
3914 u16 rx_alarm_ctrl_val;
3915 u16 lasi_ctrl_val;
3916 if (ext_phy_type ==
3917 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
3918 rx_alarm_ctrl_val = 0x400;
3919 lasi_ctrl_val = 0x0004;
3920 } else {
3921 rx_alarm_ctrl_val = (1<<2);
3922 lasi_ctrl_val = 0x0004;
3923 }
3924
3925 /* enable LASI */
3926 bnx2x_cl45_write(bp, params->port,
3927 ext_phy_type,
3928 ext_phy_addr,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_RX_ALARM_CTRL,
3931 rx_alarm_ctrl_val);
3932
3933 bnx2x_cl45_write(bp, params->port,
3934 ext_phy_type,
3935 ext_phy_addr,
3936 MDIO_PMA_DEVAD,
3937 MDIO_PMA_REG_LASI_CTRL,
3938 lasi_ctrl_val);
3939
3940 bnx2x_8073_set_pause_cl37(params, vars);
3941
3942 if (ext_phy_type ==
3943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)
3944 bnx2x_bcm8072_external_rom_boot(params);
3945 else
3946 /* In case of 8073 with long xaui lines,
3947 don't set the 8073 xaui low power*/
3948 bnx2x_bcm8073_set_xaui_low_power_mode(params);
3949
3950 bnx2x_cl45_read(bp, params->port,
3951 ext_phy_type,
3952 ext_phy_addr,
3953 MDIO_PMA_DEVAD,
3954 MDIO_PMA_REG_M8051_MSGOUT_REG,
3955 &tmp1);
3956
3957 bnx2x_cl45_read(bp, params->port,
3958 ext_phy_type,
3959 ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_RX_ALARM, &tmp1);
3962
3963 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
3964 "0x%x\n", tmp1);
3965
3966 /* If this is forced speed, set to KR or KX
3967 * (all other are not supported)
3968 */
3969 if (params->loopback_mode == LOOPBACK_EXT) {
3970 bnx2x_bcm807x_force_10G(params);
3971 DP(NETIF_MSG_LINK,
3972 "Forced speed 10G on 807X\n");
3973 break;
3974 } else {
3975 bnx2x_cl45_write(bp, params->port,
3976 ext_phy_type, ext_phy_addr,
3977 MDIO_PMA_DEVAD,
3978 MDIO_PMA_REG_BCM_CTRL,
3979 0x0002);
3980 }
3981 if (params->req_line_speed != SPEED_AUTO_NEG) {
3982 if (params->req_line_speed == SPEED_10000) {
3983 val = (1<<7);
3984 } else if (params->req_line_speed ==
3985 SPEED_2500) {
3986 val = (1<<5);
3987 /* Note that 2.5G works only
3988 when used with 1G advertisment */
3989 } else
3990 val = (1<<5);
3991 } else {
3992
3993 val = 0;
3994 if (params->speed_cap_mask &
3995 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
3996 val |= (1<<7);
3997
3998 /* Note that 2.5G works only when
3999 used with 1G advertisment */
4000 if (params->speed_cap_mask &
4001 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4002 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
4003 val |= (1<<5);
4004 DP(NETIF_MSG_LINK,
4005 "807x autoneg val = 0x%x\n", val);
4006 }
4007
4008 bnx2x_cl45_write(bp, params->port,
4009 ext_phy_type,
4010 ext_phy_addr,
4011 MDIO_AN_DEVAD,
4012 MDIO_AN_REG_ADV, val);
4013 if (ext_phy_type ==
4014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4015 bnx2x_cl45_read(bp, params->port,
4016 ext_phy_type,
4017 ext_phy_addr,
4018 MDIO_AN_DEVAD,
4019 MDIO_AN_REG_8073_2_5G, &tmp1);
4020
4021 if (((params->speed_cap_mask &
4022 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
4023 (params->req_line_speed ==
4024 SPEED_AUTO_NEG)) ||
4025 (params->req_line_speed ==
4026 SPEED_2500)) {
4027 u16 phy_ver;
4028 /* Allow 2.5G for A1 and above */
4029 bnx2x_cl45_read(bp, params->port,
4030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4031 ext_phy_addr,
4032 MDIO_PMA_DEVAD,
4033 MDIO_PMA_REG_8073_CHIP_REV, &phy_ver);
4034 DP(NETIF_MSG_LINK, "Add 2.5G\n");
4035 if (phy_ver > 0)
4036 tmp1 |= 1;
4037 else
4038 tmp1 &= 0xfffe;
4039 } else {
4040 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
4041 tmp1 &= 0xfffe;
4042 }
4043
4044 bnx2x_cl45_write(bp, params->port,
4045 ext_phy_type,
4046 ext_phy_addr,
4047 MDIO_AN_DEVAD,
4048 MDIO_AN_REG_8073_2_5G, tmp1);
4049 }
4050
4051 /* Add support for CL37 (passive mode) II */
4052
4053 bnx2x_cl45_read(bp, params->port,
4054 ext_phy_type,
4055 ext_phy_addr,
4056 MDIO_AN_DEVAD,
4057 MDIO_AN_REG_CL37_FC_LD,
4058 &tmp1);
4059
4060 bnx2x_cl45_write(bp, params->port,
4061 ext_phy_type,
4062 ext_phy_addr,
4063 MDIO_AN_DEVAD,
4064 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
4065 ((params->req_duplex == DUPLEX_FULL) ?
4066 0x20 : 0x40)));
4067
4068 /* Add support for CL37 (passive mode) III */
4069 bnx2x_cl45_write(bp, params->port,
4070 ext_phy_type,
4071 ext_phy_addr,
4072 MDIO_AN_DEVAD,
4073 MDIO_AN_REG_CL37_AN, 0x1000);
4074
4075 if (ext_phy_type ==
4076 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4077 /* The SNR will improve about 2db by changing
4078 BW and FEE main tap. Rest commands are executed
4079 after link is up*/
4080 /*Change FFE main cursor to 5 in EDC register*/
4081 if (bnx2x_8073_is_snr_needed(params))
4082 bnx2x_cl45_write(bp, params->port,
4083 ext_phy_type,
4084 ext_phy_addr,
4085 MDIO_PMA_DEVAD,
4086 MDIO_PMA_REG_EDC_FFE_MAIN,
4087 0xFB0C);
4088
4089 /* Enable FEC (Forware Error Correction)
4090 Request in the AN */
4091 bnx2x_cl45_read(bp, params->port,
4092 ext_phy_type,
4093 ext_phy_addr,
4094 MDIO_AN_DEVAD,
4095 MDIO_AN_REG_ADV2, &tmp1);
4096
4097 tmp1 |= (1<<15);
4098
4099 bnx2x_cl45_write(bp, params->port,
4100 ext_phy_type,
4101 ext_phy_addr,
4102 MDIO_AN_DEVAD,
4103 MDIO_AN_REG_ADV2, tmp1);
4104
4105 }
4106
4107 bnx2x_ext_phy_set_pause(params, vars);
4108
4109 /* Restart autoneg */
4110 msleep(500);
4111 bnx2x_cl45_write(bp, params->port,
4112 ext_phy_type,
4113 ext_phy_addr,
4114 MDIO_AN_DEVAD,
4115 MDIO_AN_REG_CTRL, 0x1200);
4116 DP(NETIF_MSG_LINK, "807x Autoneg Restart: "
4117 "Advertise 1G=%x, 10G=%x\n",
4118 ((val & (1<<5)) > 0),
4119 ((val & (1<<7)) > 0));
4120 break;
4121 }
4122
4123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4124 {
4125 u16 tmp1;
4126 u16 rx_alarm_ctrl_val;
4127 u16 lasi_ctrl_val;
4128
4129 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
4130
4131 u16 mod_abs;
4132 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
4133 lasi_ctrl_val = 0x0004;
4134
4135 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
4136 /* enable LASI */
4137 bnx2x_cl45_write(bp, params->port,
4138 ext_phy_type,
4139 ext_phy_addr,
4140 MDIO_PMA_DEVAD,
4141 MDIO_PMA_REG_RX_ALARM_CTRL,
4142 rx_alarm_ctrl_val);
4143
4144 bnx2x_cl45_write(bp, params->port,
4145 ext_phy_type,
4146 ext_phy_addr,
4147 MDIO_PMA_DEVAD,
4148 MDIO_PMA_REG_LASI_CTRL,
4149 lasi_ctrl_val);
4150
4151 /* Initially configure MOD_ABS to interrupt when
4152 module is presence( bit 8) */
4153 bnx2x_cl45_read(bp, params->port,
4154 ext_phy_type,
4155 ext_phy_addr,
4156 MDIO_PMA_DEVAD,
4157 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4158 /* Set EDC off by setting OPTXLOS signal input to low
4159 (bit 9).
4160 When the EDC is off it locks onto a reference clock and
4161 avoids becoming 'lost'.*/
4162 mod_abs &= ~((1<<8) | (1<<9));
4163 bnx2x_cl45_write(bp, params->port,
4164 ext_phy_type,
4165 ext_phy_addr,
4166 MDIO_PMA_DEVAD,
4167 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4168
4169 /* Make MOD_ABS give interrupt on change */
4170 bnx2x_cl45_read(bp, params->port,
4171 ext_phy_type,
4172 ext_phy_addr,
4173 MDIO_PMA_DEVAD,
4174 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4175 &val);
4176 val |= (1<<12);
4177 bnx2x_cl45_write(bp, params->port,
4178 ext_phy_type,
4179 ext_phy_addr,
4180 MDIO_PMA_DEVAD,
4181 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4182 val);
4183
4184 /* Set 8727 GPIOs to input to allow reading from the
4185 8727 GPIO0 status which reflect SFP+ module
4186 over-current */
4187
4188 bnx2x_cl45_read(bp, params->port,
4189 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4190 ext_phy_addr,
4191 MDIO_PMA_DEVAD,
4192 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4193 &val);
4194 val &= 0xff8f; /* Reset bits 4-6 */
4195 bnx2x_cl45_write(bp, params->port,
4196 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4197 ext_phy_addr,
4198 MDIO_PMA_DEVAD,
4199 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4200 val);
4201
4202 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
4203 bnx2x_bcm8073_set_xaui_low_power_mode(params);
4204
4205 bnx2x_cl45_read(bp, params->port,
4206 ext_phy_type,
4207 ext_phy_addr,
4208 MDIO_PMA_DEVAD,
4209 MDIO_PMA_REG_M8051_MSGOUT_REG,
4210 &tmp1);
4211
4212 bnx2x_cl45_read(bp, params->port,
4213 ext_phy_type,
4214 ext_phy_addr,
4215 MDIO_PMA_DEVAD,
4216 MDIO_PMA_REG_RX_ALARM, &tmp1);
4217
4218 /* Set option 1G speed */
4219 if (params->req_line_speed == SPEED_1000) {
4220
4221 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4222 bnx2x_cl45_write(bp, params->port,
4223 ext_phy_type,
4224 ext_phy_addr,
4225 MDIO_PMA_DEVAD,
4226 MDIO_PMA_REG_CTRL, 0x40);
4227 bnx2x_cl45_write(bp, params->port,
4228 ext_phy_type,
4229 ext_phy_addr,
4230 MDIO_PMA_DEVAD,
4231 MDIO_PMA_REG_10G_CTRL2, 0xD);
4232 bnx2x_cl45_read(bp, params->port,
4233 ext_phy_type,
4234 ext_phy_addr,
4235 MDIO_PMA_DEVAD,
4236 MDIO_PMA_REG_10G_CTRL2, &tmp1);
4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4238
4239 } else if ((params->req_line_speed ==
4240 SPEED_AUTO_NEG) &&
4241 ((params->speed_cap_mask &
4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
4243
4244 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4245 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4246 ext_phy_addr, MDIO_AN_DEVAD,
4247 MDIO_PMA_REG_8727_MISC_CTRL, 0);
4248 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4249 ext_phy_addr, MDIO_AN_DEVAD,
4250 MDIO_AN_REG_CL37_AN, 0x1300);
4251 } else {
4252 /* Since the 8727 has only single reset pin,
4253 need to set the 10G registers although it is
4254 default */
4255 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4256 ext_phy_addr, MDIO_AN_DEVAD,
4257 MDIO_AN_REG_CTRL, 0x0020);
4258 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4259 ext_phy_addr, MDIO_AN_DEVAD,
4260 0x7, 0x0100);
4261 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4262 ext_phy_addr, MDIO_PMA_DEVAD,
4263 MDIO_PMA_REG_CTRL, 0x2040);
4264 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4265 ext_phy_addr, MDIO_PMA_DEVAD,
4266 MDIO_PMA_REG_10G_CTRL2, 0x0008);
4267 }
4268
4269 /* Set 2-wire transfer rate of SFP+ module EEPROM
4270 * to 100Khz since some DACs(direct attached cables) do
4271 * not work at 400Khz.
4272 */
4273 bnx2x_cl45_write(bp, params->port,
4274 ext_phy_type,
4275 ext_phy_addr,
4276 MDIO_PMA_DEVAD,
4277 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4278 0xa001);
4279
4280 /* Set TX PreEmphasis if needed */
4281 if ((params->feature_config_flags &
4282 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4283 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
4284 "TX_CTRL2 0x%x\n",
4285 params->xgxs_config_tx[0],
4286 params->xgxs_config_tx[1]);
4287 bnx2x_cl45_write(bp, params->port,
4288 ext_phy_type,
4289 ext_phy_addr,
4290 MDIO_PMA_DEVAD,
4291 MDIO_PMA_REG_8727_TX_CTRL1,
4292 params->xgxs_config_tx[0]);
4293
4294 bnx2x_cl45_write(bp, params->port,
4295 ext_phy_type,
4296 ext_phy_addr,
4297 MDIO_PMA_DEVAD,
4298 MDIO_PMA_REG_8727_TX_CTRL2,
4299 params->xgxs_config_tx[1]);
4300 }
4301
4302 break;
4303 }
4304
4305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4306 {
4307 u16 fw_ver1, fw_ver2;
4308 DP(NETIF_MSG_LINK,
4309 "Setting the SFX7101 LASI indication\n");
4310
4311 bnx2x_cl45_write(bp, params->port,
4312 ext_phy_type,
4313 ext_phy_addr,
4314 MDIO_PMA_DEVAD,
4315 MDIO_PMA_REG_LASI_CTRL, 0x1);
4316 DP(NETIF_MSG_LINK,
4317 "Setting the SFX7101 LED to blink on traffic\n");
4318 bnx2x_cl45_write(bp, params->port,
4319 ext_phy_type,
4320 ext_phy_addr,
4321 MDIO_PMA_DEVAD,
4322 MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
4323
4324 bnx2x_ext_phy_set_pause(params, vars);
4325 /* Restart autoneg */
4326 bnx2x_cl45_read(bp, params->port,
4327 ext_phy_type,
4328 ext_phy_addr,
4329 MDIO_AN_DEVAD,
4330 MDIO_AN_REG_CTRL, &val);
4331 val |= 0x200;
4332 bnx2x_cl45_write(bp, params->port,
4333 ext_phy_type,
4334 ext_phy_addr,
4335 MDIO_AN_DEVAD,
4336 MDIO_AN_REG_CTRL, val);
4337
4338 /* Save spirom version */
4339 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4340 ext_phy_addr, MDIO_PMA_DEVAD,
4341 MDIO_PMA_REG_7101_VER1, &fw_ver1);
4342
4343 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4344 ext_phy_addr, MDIO_PMA_DEVAD,
4345 MDIO_PMA_REG_7101_VER2, &fw_ver2);
4346
4347 bnx2x_save_spirom_version(params->bp, params->port,
4348 params->shmem_base,
4349 (u32)(fw_ver1<<16 | fw_ver2));
4350 break;
4351 }
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
4353 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
4354 /* This phy uses the NIG latch mechanism since link
4355 indication arrives through its LED4 and not via
4356 its LASI signal, so we get steady signal
4357 instead of clear on read */
4358 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
4359 1 << NIG_LATCH_BC_ENABLE_MI_INT);
4360
4361 bnx2x_cl45_write(bp, params->port,
4362 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
4363 ext_phy_addr,
4364 MDIO_PMA_DEVAD,
4365 MDIO_PMA_REG_CTRL, 0x0000);
4366
4367 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
4368 if (params->req_line_speed == SPEED_AUTO_NEG) {
4369
4370 u16 autoneg_val, an_1000_val, an_10_100_val;
4371 /* set 1000 speed advertisement */
4372 bnx2x_cl45_read(bp, params->port,
4373 ext_phy_type,
4374 ext_phy_addr,
4375 MDIO_AN_DEVAD,
4376 MDIO_AN_REG_8481_1000T_CTRL,
4377 &an_1000_val);
4378
4379 if (params->speed_cap_mask &
4380 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
4381 an_1000_val |= (1<<8);
4382 if (params->req_duplex == DUPLEX_FULL)
4383 an_1000_val |= (1<<9);
4384 DP(NETIF_MSG_LINK, "Advertising 1G\n");
4385 } else
4386 an_1000_val &= ~((1<<8) | (1<<9));
4387
4388 bnx2x_cl45_write(bp, params->port,
4389 ext_phy_type,
4390 ext_phy_addr,
4391 MDIO_AN_DEVAD,
4392 MDIO_AN_REG_8481_1000T_CTRL,
4393 an_1000_val);
4394
4395 /* set 100 speed advertisement */
4396 bnx2x_cl45_read(bp, params->port,
4397 ext_phy_type,
4398 ext_phy_addr,
4399 MDIO_AN_DEVAD,
4400 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4401 &an_10_100_val);
4402
4403 if (params->speed_cap_mask &
4404 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
4405 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
4406 an_10_100_val |= (1<<7);
4407 if (params->req_duplex == DUPLEX_FULL)
4408 an_10_100_val |= (1<<8);
4409 DP(NETIF_MSG_LINK,
4410 "Advertising 100M\n");
4411 } else
4412 an_10_100_val &= ~((1<<7) | (1<<8));
4413
4414 /* set 10 speed advertisement */
4415 if (params->speed_cap_mask &
4416 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
4417 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
4418 an_10_100_val |= (1<<5);
4419 if (params->req_duplex == DUPLEX_FULL)
4420 an_10_100_val |= (1<<6);
4421 DP(NETIF_MSG_LINK, "Advertising 10M\n");
4422 }
4423 else
4424 an_10_100_val &= ~((1<<5) | (1<<6));
4425
4426 bnx2x_cl45_write(bp, params->port,
4427 ext_phy_type,
4428 ext_phy_addr,
4429 MDIO_AN_DEVAD,
4430 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4431 an_10_100_val);
4432
4433 bnx2x_cl45_read(bp, params->port,
4434 ext_phy_type,
4435 ext_phy_addr,
4436 MDIO_AN_DEVAD,
4437 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4438 &autoneg_val);
4439
4440 /* Disable forced speed */
4441 autoneg_val &= ~(1<<6|1<<13);
4442
4443 /* Enable autoneg and restart autoneg
4444 for legacy speeds */
4445 autoneg_val |= (1<<9|1<<12);
4446
4447 if (params->req_duplex == DUPLEX_FULL)
4448 autoneg_val |= (1<<8);
4449 else
4450 autoneg_val &= ~(1<<8);
4451
4452 bnx2x_cl45_write(bp, params->port,
4453 ext_phy_type,
4454 ext_phy_addr,
4455 MDIO_AN_DEVAD,
4456 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4457 autoneg_val);
4458
4459 if (params->speed_cap_mask &
4460 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4461 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4462 /* Restart autoneg for 10G*/
4463
4464 bnx2x_cl45_write(bp, params->port,
4465 ext_phy_type,
4466 ext_phy_addr,
4467 MDIO_AN_DEVAD,
4468 MDIO_AN_REG_CTRL, 0x3200);
4469 }
4470 } else {
4471 /* Force speed */
4472 u16 autoneg_ctrl, pma_ctrl;
4473 bnx2x_cl45_read(bp, params->port,
4474 ext_phy_type,
4475 ext_phy_addr,
4476 MDIO_AN_DEVAD,
4477 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4478 &autoneg_ctrl);
4479
4480 /* Disable autoneg */
4481 autoneg_ctrl &= ~(1<<12);
4482
4483 /* Set 1000 force */
4484 switch (params->req_line_speed) {
4485 case SPEED_10000:
4486 DP(NETIF_MSG_LINK,
4487 "Unable to set 10G force !\n");
4488 break;
4489 case SPEED_1000:
4490 bnx2x_cl45_read(bp, params->port,
4491 ext_phy_type,
4492 ext_phy_addr,
4493 MDIO_PMA_DEVAD,
4494 MDIO_PMA_REG_CTRL,
4495 &pma_ctrl);
4496 autoneg_ctrl &= ~(1<<13);
4497 autoneg_ctrl |= (1<<6);
4498 pma_ctrl &= ~(1<<13);
4499 pma_ctrl |= (1<<6);
4500 DP(NETIF_MSG_LINK,
4501 "Setting 1000M force\n");
4502 bnx2x_cl45_write(bp, params->port,
4503 ext_phy_type,
4504 ext_phy_addr,
4505 MDIO_PMA_DEVAD,
4506 MDIO_PMA_REG_CTRL,
4507 pma_ctrl);
4508 break;
4509 case SPEED_100:
4510 autoneg_ctrl |= (1<<13);
4511 autoneg_ctrl &= ~(1<<6);
4512 DP(NETIF_MSG_LINK,
4513 "Setting 100M force\n");
4514 break;
4515 case SPEED_10:
4516 autoneg_ctrl &= ~(1<<13);
4517 autoneg_ctrl &= ~(1<<6);
4518 DP(NETIF_MSG_LINK,
4519 "Setting 10M force\n");
4520 break;
4521 }
4522
4523 /* Duplex mode */
4524 if (params->req_duplex == DUPLEX_FULL) {
4525 autoneg_ctrl |= (1<<8);
4526 DP(NETIF_MSG_LINK,
4527 "Setting full duplex\n");
4528 } else
4529 autoneg_ctrl &= ~(1<<8);
4530
4531 /* Update autoneg ctrl and pma ctrl */
4532 bnx2x_cl45_write(bp, params->port,
4533 ext_phy_type,
4534 ext_phy_addr,
4535 MDIO_AN_DEVAD,
4536 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4537 autoneg_ctrl);
4538 }
4539
4540 /* Save spirom version */
4541 bnx2x_save_8481_spirom_version(bp, params->port,
4542 ext_phy_addr,
4543 params->shmem_base);
4544 break;
4545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
4546 DP(NETIF_MSG_LINK,
4547 "XGXS PHY Failure detected 0x%x\n",
4548 params->ext_phy_config);
4549 rc = -EINVAL;
4550 break;
4551 default:
4552 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
4553 params->ext_phy_config);
4554 rc = -EINVAL;
4555 break;
4556 }
4557
4558 } else { /* SerDes */
4559
4560 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
4561 switch (ext_phy_type) {
4562 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
4563 DP(NETIF_MSG_LINK, "SerDes Direct\n");
4564 break;
4565
4566 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
4567 DP(NETIF_MSG_LINK, "SerDes 5482\n");
4568 break;
4569
4570 default:
4571 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
4572 params->ext_phy_config);
4573 break;
4574 }
4575 }
4576 return rc;
4577}
4578
4579static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4580{
4581 struct bnx2x *bp = params->bp;
4582 u16 mod_abs, rx_alarm_status;
4583 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
4584 u32 val = REG_RD(bp, params->shmem_base +
4585 offsetof(struct shmem_region, dev_info.
4586 port_feature_config[params->port].
4587 config));
4588 bnx2x_cl45_read(bp, params->port,
4589 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4590 ext_phy_addr,
4591 MDIO_PMA_DEVAD,
4592 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4593 if (mod_abs & (1<<8)) {
4594
4595 /* Module is absent */
4596 DP(NETIF_MSG_LINK, "MOD_ABS indication "
4597 "show module is absent\n");
4598
4599 /* 1. Set mod_abs to detect next module
4600 presence event
4601 2. Set EDC off by setting OPTXLOS signal input to low
4602 (bit 9).
4603 When the EDC is off it locks onto a reference clock and
4604 avoids becoming 'lost'.*/
4605 mod_abs &= ~((1<<8)|(1<<9));
4606 bnx2x_cl45_write(bp, params->port,
4607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4608 ext_phy_addr,
4609 MDIO_PMA_DEVAD,
4610 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4611
4612 /* Clear RX alarm since it stays up as long as
4613 the mod_abs wasn't changed */
4614 bnx2x_cl45_read(bp, params->port,
4615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4616 ext_phy_addr,
4617 MDIO_PMA_DEVAD,
4618 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4619
4620 } else {
4621 /* Module is present */
4622 DP(NETIF_MSG_LINK, "MOD_ABS indication "
4623 "show module is present\n");
4624 /* First thing, disable transmitter,
4625 and if the module is ok, the
4626 module_detection will enable it*/
4627
4628 /* 1. Set mod_abs to detect next module
4629 absent event ( bit 8)
4630 2. Restore the default polarity of the OPRXLOS signal and
4631 this signal will then correctly indicate the presence or
4632 absence of the Rx signal. (bit 9) */
4633 mod_abs |= ((1<<8)|(1<<9));
4634 bnx2x_cl45_write(bp, params->port,
4635 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4636 ext_phy_addr,
4637 MDIO_PMA_DEVAD,
4638 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4639
4640 /* Clear RX alarm since it stays up as long as
4641 the mod_abs wasn't changed. This is need to be done
4642 before calling the module detection, otherwise it will clear
4643 the link update alarm */
4644 bnx2x_cl45_read(bp, params->port,
4645 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4646 ext_phy_addr,
4647 MDIO_PMA_DEVAD,
4648 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4649
4650
4651 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4652 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
4653 bnx2x_sfp_set_transmitter(bp, params->port,
4654 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4655 ext_phy_addr, 0);
4656
4657 if (bnx2x_wait_for_sfp_module_initialized(params)
4658 == 0)
4659 bnx2x_sfp_module_detection(params);
4660 else
4661 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
4662 }
4663
4664 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4665 rx_alarm_status);
4666 /* No need to check link status in case of
4667 module plugged in/out */
4668}
4669
4670
4671static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
4672 struct link_vars *vars,
4673 u8 is_mi_int)
4674{
4675 struct bnx2x *bp = params->bp;
4676 u32 ext_phy_type;
4677 u8 ext_phy_addr;
4678 u16 val1 = 0, val2;
4679 u16 rx_sd, pcs_status;
4680 u8 ext_phy_link_up = 0;
4681 u8 port = params->port;
4682
4683 if (vars->phy_flags & PHY_XGXS_FLAG) {
4684 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
4685 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
4686 switch (ext_phy_type) {
4687 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
4688 DP(NETIF_MSG_LINK, "XGXS Direct\n");
4689 ext_phy_link_up = 1;
4690 break;
4691
4692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
4693 DP(NETIF_MSG_LINK, "XGXS 8705\n");
4694 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4695 ext_phy_addr,
4696 MDIO_WIS_DEVAD,
4697 MDIO_WIS_REG_LASI_STATUS, &val1);
4698 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4699
4700 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4701 ext_phy_addr,
4702 MDIO_WIS_DEVAD,
4703 MDIO_WIS_REG_LASI_STATUS, &val1);
4704 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4705
4706 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4707 ext_phy_addr,
4708 MDIO_PMA_DEVAD,
4709 MDIO_PMA_REG_RX_SD, &rx_sd);
4710
4711 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4712 ext_phy_addr,
4713 1,
4714 0xc809, &val1);
4715 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4716 ext_phy_addr,
4717 1,
4718 0xc809, &val1);
4719
4720 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
4721 ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
4722 ((val1 & (1<<8)) == 0));
4723 if (ext_phy_link_up)
4724 vars->line_speed = SPEED_10000;
4725 break;
4726
4727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
4728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4729 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
4730 /* Clear RX Alarm*/
4731 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4732 ext_phy_addr,
4733 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4734 &val2);
4735 /* clear LASI indication*/
4736 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4737 ext_phy_addr,
4738 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4739 &val1);
4740 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4741 ext_phy_addr,
4742 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4743 &val2);
4744 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x-->"
4745 "0x%x\n", val1, val2);
4746
4747 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4748 ext_phy_addr,
4749 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD,
4750 &rx_sd);
4751 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4752 ext_phy_addr,
4753 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS,
4754 &pcs_status);
4755 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4756 ext_phy_addr,
4757 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4758 &val2);
4759 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4760 ext_phy_addr,
4761 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4762 &val2);
4763
4764 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x"
4765 " pcs_status 0x%x 1Gbps link_status 0x%x\n",
4766 rx_sd, pcs_status, val2);
4767 /* link is up if both bit 0 of pmd_rx_sd and
4768 * bit 0 of pcs_status are set, or if the autoneg bit
4769 1 is set
4770 */
4771 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
4772 (val2 & (1<<1)));
4773 if (ext_phy_link_up) {
4774 if (ext_phy_type ==
4775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
4776 /* If transmitter is disabled,
4777 ignore false link up indication */
4778 bnx2x_cl45_read(bp, params->port,
4779 ext_phy_type,
4780 ext_phy_addr,
4781 MDIO_PMA_DEVAD,
4782 MDIO_PMA_REG_PHY_IDENTIFIER,
4783 &val1);
4784 if (val1 & (1<<15)) {
4785 DP(NETIF_MSG_LINK, "Tx is "
4786 "disabled\n");
4787 ext_phy_link_up = 0;
4788 break;
4789 }
4790 }
4791 if (val2 & (1<<1))
4792 vars->line_speed = SPEED_1000;
4793 else
4794 vars->line_speed = SPEED_10000;
4795 }
4796 break;
4797
4798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4799 {
4800 u16 link_status = 0;
4801 u16 rx_alarm_status;
4802 /* Check the LASI */
4803 bnx2x_cl45_read(bp, params->port,
4804 ext_phy_type,
4805 ext_phy_addr,
4806 MDIO_PMA_DEVAD,
4807 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4808
4809 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4810 rx_alarm_status);
4811
4812 bnx2x_cl45_read(bp, params->port,
4813 ext_phy_type,
4814 ext_phy_addr,
4815 MDIO_PMA_DEVAD,
4816 MDIO_PMA_REG_LASI_STATUS, &val1);
4817
4818 DP(NETIF_MSG_LINK,
4819 "8727 LASI status 0x%x\n",
4820 val1);
4821
4822 /* Clear MSG-OUT */
4823 bnx2x_cl45_read(bp, params->port,
4824 ext_phy_type,
4825 ext_phy_addr,
4826 MDIO_PMA_DEVAD,
4827 MDIO_PMA_REG_M8051_MSGOUT_REG,
4828 &val1);
4829
4830 /*
4831 * If a module is present and there is need to check
4832 * for over current
4833 */
4834 if (!(params->feature_config_flags &
4835 FEATURE_CONFIG_BCM8727_NOC) &&
4836 !(rx_alarm_status & (1<<5))) {
4837 /* Check over-current using 8727 GPIO0 input*/
4838 bnx2x_cl45_read(bp, params->port,
4839 ext_phy_type,
4840 ext_phy_addr,
4841 MDIO_PMA_DEVAD,
4842 MDIO_PMA_REG_8727_GPIO_CTRL,
4843 &val1);
4844
4845 if ((val1 & (1<<8)) == 0) {
4846 DP(NETIF_MSG_LINK, "8727 Power fault"
4847 " has been detected on "
4848 "port %d\n",
4849 params->port);
4850 netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
4851 params->port);
4852 /*
4853 * Disable all RX_ALARMs except for
4854 * mod_abs
4855 */
4856 bnx2x_cl45_write(bp, params->port,
4857 ext_phy_type,
4858 ext_phy_addr,
4859 MDIO_PMA_DEVAD,
4860 MDIO_PMA_REG_RX_ALARM_CTRL,
4861 (1<<5));
4862
4863 bnx2x_cl45_read(bp, params->port,
4864 ext_phy_type,
4865 ext_phy_addr,
4866 MDIO_PMA_DEVAD,
4867 MDIO_PMA_REG_PHY_IDENTIFIER,
4868 &val1);
4869 /* Wait for module_absent_event */
4870 val1 |= (1<<8);
4871 bnx2x_cl45_write(bp, params->port,
4872 ext_phy_type,
4873 ext_phy_addr,
4874 MDIO_PMA_DEVAD,
4875 MDIO_PMA_REG_PHY_IDENTIFIER,
4876 val1);
4877 /* Clear RX alarm */
4878 bnx2x_cl45_read(bp, params->port,
4879 ext_phy_type,
4880 ext_phy_addr,
4881 MDIO_PMA_DEVAD,
4882 MDIO_PMA_REG_RX_ALARM,
4883 &rx_alarm_status);
4884 break;
4885 }
4886 } /* Over current check */
4887
4888 /* When module absent bit is set, check module */
4889 if (rx_alarm_status & (1<<5)) {
4890 bnx2x_8727_handle_mod_abs(params);
4891 /* Enable all mod_abs and link detection bits */
4892 bnx2x_cl45_write(bp, params->port,
4893 ext_phy_type,
4894 ext_phy_addr,
4895 MDIO_PMA_DEVAD,
4896 MDIO_PMA_REG_RX_ALARM_CTRL,
4897 ((1<<5) | (1<<2)));
4898 }
4899
4900 /* If transmitter is disabled,
4901 ignore false link up indication */
4902 bnx2x_cl45_read(bp, params->port,
4903 ext_phy_type,
4904 ext_phy_addr,
4905 MDIO_PMA_DEVAD,
4906 MDIO_PMA_REG_PHY_IDENTIFIER,
4907 &val1);
4908 if (val1 & (1<<15)) {
4909 DP(NETIF_MSG_LINK, "Tx is disabled\n");
4910 ext_phy_link_up = 0;
4911 break;
4912 }
4913
4914 bnx2x_cl45_read(bp, params->port,
4915 ext_phy_type,
4916 ext_phy_addr,
4917 MDIO_PMA_DEVAD,
4918 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
4919 &link_status);
4920
4921 /* Bits 0..2 --> speed detected,
4922 bits 13..15--> link is down */
4923 if ((link_status & (1<<2)) &&
4924 (!(link_status & (1<<15)))) {
4925 ext_phy_link_up = 1;
4926 vars->line_speed = SPEED_10000;
4927 } else if ((link_status & (1<<0)) &&
4928 (!(link_status & (1<<13)))) {
4929 ext_phy_link_up = 1;
4930 vars->line_speed = SPEED_1000;
4931 DP(NETIF_MSG_LINK,
4932 "port %x: External link"
4933 " up in 1G\n", params->port);
4934 } else {
4935 ext_phy_link_up = 0;
4936 DP(NETIF_MSG_LINK,
4937 "port %x: External link"
4938 " is down\n", params->port);
4939 }
4940 break;
4941 }
4942
4943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4945 {
4946 u16 link_status = 0;
4947 u16 an1000_status = 0;
4948
4949 if (ext_phy_type ==
4950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
4951 bnx2x_cl45_read(bp, params->port,
4952 ext_phy_type,
4953 ext_phy_addr,
4954 MDIO_PCS_DEVAD,
4955 MDIO_PCS_REG_LASI_STATUS, &val1);
4956 bnx2x_cl45_read(bp, params->port,
4957 ext_phy_type,
4958 ext_phy_addr,
4959 MDIO_PCS_DEVAD,
4960 MDIO_PCS_REG_LASI_STATUS, &val2);
4961 DP(NETIF_MSG_LINK,
4962 "870x LASI status 0x%x->0x%x\n",
4963 val1, val2);
4964 } else {
4965 /* In 8073, port1 is directed through emac0 and
4966 * port0 is directed through emac1
4967 */
4968 bnx2x_cl45_read(bp, params->port,
4969 ext_phy_type,
4970 ext_phy_addr,
4971 MDIO_PMA_DEVAD,
4972 MDIO_PMA_REG_LASI_STATUS, &val1);
4973
4974 DP(NETIF_MSG_LINK,
4975 "8703 LASI status 0x%x\n",
4976 val1);
4977 }
4978
4979 /* clear the interrupt LASI status register */
4980 bnx2x_cl45_read(bp, params->port,
4981 ext_phy_type,
4982 ext_phy_addr,
4983 MDIO_PCS_DEVAD,
4984 MDIO_PCS_REG_STATUS, &val2);
4985 bnx2x_cl45_read(bp, params->port,
4986 ext_phy_type,
4987 ext_phy_addr,
4988 MDIO_PCS_DEVAD,
4989 MDIO_PCS_REG_STATUS, &val1);
4990 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
4991 val2, val1);
4992 /* Clear MSG-OUT */
4993 bnx2x_cl45_read(bp, params->port,
4994 ext_phy_type,
4995 ext_phy_addr,
4996 MDIO_PMA_DEVAD,
4997 MDIO_PMA_REG_M8051_MSGOUT_REG,
4998 &val1);
4999
5000 /* Check the LASI */
5001 bnx2x_cl45_read(bp, params->port,
5002 ext_phy_type,
5003 ext_phy_addr,
5004 MDIO_PMA_DEVAD,
5005 MDIO_PMA_REG_RX_ALARM, &val2);
5006
5007 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
5008
5009 /* Check the link status */
5010 bnx2x_cl45_read(bp, params->port,
5011 ext_phy_type,
5012 ext_phy_addr,
5013 MDIO_PCS_DEVAD,
5014 MDIO_PCS_REG_STATUS, &val2);
5015 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
5016
5017 bnx2x_cl45_read(bp, params->port,
5018 ext_phy_type,
5019 ext_phy_addr,
5020 MDIO_PMA_DEVAD,
5021 MDIO_PMA_REG_STATUS, &val2);
5022 bnx2x_cl45_read(bp, params->port,
5023 ext_phy_type,
5024 ext_phy_addr,
5025 MDIO_PMA_DEVAD,
5026 MDIO_PMA_REG_STATUS, &val1);
5027 ext_phy_link_up = ((val1 & 4) == 4);
5028 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
5029 if (ext_phy_type ==
5030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
5031
5032 if (ext_phy_link_up &&
5033 ((params->req_line_speed !=
5034 SPEED_10000))) {
5035 if (bnx2x_bcm8073_xaui_wa(params)
5036 != 0) {
5037 ext_phy_link_up = 0;
5038 break;
5039 }
5040 }
5041 bnx2x_cl45_read(bp, params->port,
5042 ext_phy_type,
5043 ext_phy_addr,
5044 MDIO_AN_DEVAD,
5045 MDIO_AN_REG_LINK_STATUS,
5046 &an1000_status);
5047 bnx2x_cl45_read(bp, params->port,
5048 ext_phy_type,
5049 ext_phy_addr,
5050 MDIO_AN_DEVAD,
5051 MDIO_AN_REG_LINK_STATUS,
5052 &an1000_status);
5053
5054 /* Check the link status on 1.1.2 */
5055 bnx2x_cl45_read(bp, params->port,
5056 ext_phy_type,
5057 ext_phy_addr,
5058 MDIO_PMA_DEVAD,
5059 MDIO_PMA_REG_STATUS, &val2);
5060 bnx2x_cl45_read(bp, params->port,
5061 ext_phy_type,
5062 ext_phy_addr,
5063 MDIO_PMA_DEVAD,
5064 MDIO_PMA_REG_STATUS, &val1);
5065 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
5066 "an_link_status=0x%x\n",
5067 val2, val1, an1000_status);
5068
5069 ext_phy_link_up = (((val1 & 4) == 4) ||
5070 (an1000_status & (1<<1)));
5071 if (ext_phy_link_up &&
5072 bnx2x_8073_is_snr_needed(params)) {
5073 /* The SNR will improve about 2dbby
5074 changing the BW and FEE main tap.*/
5075
5076 /* The 1st write to change FFE main
5077 tap is set before restart AN */
5078 /* Change PLL Bandwidth in EDC
5079 register */
5080 bnx2x_cl45_write(bp, port, ext_phy_type,
5081 ext_phy_addr,
5082 MDIO_PMA_DEVAD,
5083 MDIO_PMA_REG_PLL_BANDWIDTH,
5084 0x26BC);
5085
5086 /* Change CDR Bandwidth in EDC
5087 register */
5088 bnx2x_cl45_write(bp, port, ext_phy_type,
5089 ext_phy_addr,
5090 MDIO_PMA_DEVAD,
5091 MDIO_PMA_REG_CDR_BANDWIDTH,
5092 0x0333);
5093 }
5094 bnx2x_cl45_read(bp, params->port,
5095 ext_phy_type,
5096 ext_phy_addr,
5097 MDIO_PMA_DEVAD,
5098 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
5099 &link_status);
5100
5101 /* Bits 0..2 --> speed detected,
5102 bits 13..15--> link is down */
5103 if ((link_status & (1<<2)) &&
5104 (!(link_status & (1<<15)))) {
5105 ext_phy_link_up = 1;
5106 vars->line_speed = SPEED_10000;
5107 DP(NETIF_MSG_LINK,
5108 "port %x: External link"
5109 " up in 10G\n", params->port);
5110 } else if ((link_status & (1<<1)) &&
5111 (!(link_status & (1<<14)))) {
5112 ext_phy_link_up = 1;
5113 vars->line_speed = SPEED_2500;
5114 DP(NETIF_MSG_LINK,
5115 "port %x: External link"
5116 " up in 2.5G\n", params->port);
5117 } else if ((link_status & (1<<0)) &&
5118 (!(link_status & (1<<13)))) {
5119 ext_phy_link_up = 1;
5120 vars->line_speed = SPEED_1000;
5121 DP(NETIF_MSG_LINK,
5122 "port %x: External link"
5123 " up in 1G\n", params->port);
5124 } else {
5125 ext_phy_link_up = 0;
5126 DP(NETIF_MSG_LINK,
5127 "port %x: External link"
5128 " is down\n", params->port);
5129 }
5130 } else {
5131 /* See if 1G link is up for the 8072 */
5132 bnx2x_cl45_read(bp, params->port,
5133 ext_phy_type,
5134 ext_phy_addr,
5135 MDIO_AN_DEVAD,
5136 MDIO_AN_REG_LINK_STATUS,
5137 &an1000_status);
5138 bnx2x_cl45_read(bp, params->port,
5139 ext_phy_type,
5140 ext_phy_addr,
5141 MDIO_AN_DEVAD,
5142 MDIO_AN_REG_LINK_STATUS,
5143 &an1000_status);
5144 if (an1000_status & (1<<1)) {
5145 ext_phy_link_up = 1;
5146 vars->line_speed = SPEED_1000;
5147 DP(NETIF_MSG_LINK,
5148 "port %x: External link"
5149 " up in 1G\n", params->port);
5150 } else if (ext_phy_link_up) {
5151 ext_phy_link_up = 1;
5152 vars->line_speed = SPEED_10000;
5153 DP(NETIF_MSG_LINK,
5154 "port %x: External link"
5155 " up in 10G\n", params->port);
5156 }
5157 }
5158
5159
5160 break;
5161 }
5162 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5163 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5164 ext_phy_addr,
5165 MDIO_PMA_DEVAD,
5166 MDIO_PMA_REG_LASI_STATUS, &val2);
5167 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5168 ext_phy_addr,
5169 MDIO_PMA_DEVAD,
5170 MDIO_PMA_REG_LASI_STATUS, &val1);
5171 DP(NETIF_MSG_LINK,
5172 "10G-base-T LASI status 0x%x->0x%x\n",
5173 val2, val1);
5174 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5175 ext_phy_addr,
5176 MDIO_PMA_DEVAD,
5177 MDIO_PMA_REG_STATUS, &val2);
5178 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5179 ext_phy_addr,
5180 MDIO_PMA_DEVAD,
5181 MDIO_PMA_REG_STATUS, &val1);
5182 DP(NETIF_MSG_LINK,
5183 "10G-base-T PMA status 0x%x->0x%x\n",
5184 val2, val1);
5185 ext_phy_link_up = ((val1 & 4) == 4);
5186 /* if link is up
5187 * print the AN outcome of the SFX7101 PHY
5188 */
5189 if (ext_phy_link_up) {
5190 bnx2x_cl45_read(bp, params->port,
5191 ext_phy_type,
5192 ext_phy_addr,
5193 MDIO_AN_DEVAD,
5194 MDIO_AN_REG_MASTER_STATUS,
5195 &val2);
5196 vars->line_speed = SPEED_10000;
5197 DP(NETIF_MSG_LINK,
5198 "SFX7101 AN status 0x%x->Master=%x\n",
5199 val2,
5200 (val2 & (1<<14)));
5201 }
5202 break;
5203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5205 /* Check 10G-BaseT link status */
5206 /* Check PMD signal ok */
5207 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5208 ext_phy_addr,
5209 MDIO_AN_DEVAD,
5210 0xFFFA,
5211 &val1);
5212 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5213 ext_phy_addr,
5214 MDIO_PMA_DEVAD,
5215 MDIO_PMA_REG_8481_PMD_SIGNAL,
5216 &val2);
5217 DP(NETIF_MSG_LINK, "PMD_SIGNAL 1.a811 = 0x%x\n", val2);
5218
5219 /* Check link 10G */
5220 if (val2 & (1<<11)) {
5221 vars->line_speed = SPEED_10000;
5222 ext_phy_link_up = 1;
5223 bnx2x_8481_set_10G_led_mode(params,
5224 ext_phy_type,
5225 ext_phy_addr);
5226 } else { /* Check Legacy speed link */
5227 u16 legacy_status, legacy_speed;
5228
5229 /* Enable expansion register 0x42
5230 (Operation mode status) */
5231 bnx2x_cl45_write(bp, params->port,
5232 ext_phy_type,
5233 ext_phy_addr,
5234 MDIO_AN_DEVAD,
5235 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS,
5236 0xf42);
5237
5238 /* Get legacy speed operation status */
5239 bnx2x_cl45_read(bp, params->port,
5240 ext_phy_type,
5241 ext_phy_addr,
5242 MDIO_AN_DEVAD,
5243 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
5244 &legacy_status);
5245
5246 DP(NETIF_MSG_LINK, "Legacy speed status"
5247 " = 0x%x\n", legacy_status);
5248 ext_phy_link_up = ((legacy_status & (1<<11))
5249 == (1<<11));
5250 if (ext_phy_link_up) {
5251 legacy_speed = (legacy_status & (3<<9));
5252 if (legacy_speed == (0<<9))
5253 vars->line_speed = SPEED_10;
5254 else if (legacy_speed == (1<<9))
5255 vars->line_speed =
5256 SPEED_100;
5257 else if (legacy_speed == (2<<9))
5258 vars->line_speed =
5259 SPEED_1000;
5260 else /* Should not happen */
5261 vars->line_speed = 0;
5262
5263 if (legacy_status & (1<<8))
5264 vars->duplex = DUPLEX_FULL;
5265 else
5266 vars->duplex = DUPLEX_HALF;
5267
5268 DP(NETIF_MSG_LINK, "Link is up "
5269 "in %dMbps, is_duplex_full"
5270 "= %d\n",
5271 vars->line_speed,
5272 (vars->duplex == DUPLEX_FULL));
5273 bnx2x_8481_set_legacy_led_mode(params,
5274 ext_phy_type,
5275 ext_phy_addr);
5276 }
5277 }
5278 break;
5279 default:
5280 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
5281 params->ext_phy_config);
5282 ext_phy_link_up = 0;
5283 break;
5284 }
5285 /* Set SGMII mode for external phy */
5286 if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5287 if (vars->line_speed < SPEED_1000)
5288 vars->phy_flags |= PHY_SGMII_FLAG;
5289 else
5290 vars->phy_flags &= ~PHY_SGMII_FLAG;
5291 }
5292
5293 } else { /* SerDes */
5294 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
5295 switch (ext_phy_type) {
5296 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
5297 DP(NETIF_MSG_LINK, "SerDes Direct\n");
5298 ext_phy_link_up = 1;
5299 break;
5300
5301 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
5302 DP(NETIF_MSG_LINK, "SerDes 5482\n");
5303 ext_phy_link_up = 1;
5304 break;
5305
5306 default:
5307 DP(NETIF_MSG_LINK,
5308 "BAD SerDes ext_phy_config 0x%x\n",
5309 params->ext_phy_config);
5310 ext_phy_link_up = 0;
5311 break;
5312 }
5313 }
5314
5315 return ext_phy_link_up;
5316}
5317
5318static void bnx2x_link_int_enable(struct link_params *params)
5319{
5320 u8 port = params->port;
5321 u32 ext_phy_type;
5322 u32 mask;
5323 struct bnx2x *bp = params->bp;
5324
5325 /* setting the status to report on link up
5326 for either XGXS or SerDes */
5327
5328 if (params->switch_cfg == SWITCH_CFG_10G) {
5329 mask = (NIG_MASK_XGXS0_LINK10G |
5330 NIG_MASK_XGXS0_LINK_STATUS);
5331 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
5332 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5333 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
5334 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
5335 (ext_phy_type !=
5336 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
5337 mask |= NIG_MASK_MI_INT;
5338 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5339 }
5340
5341 } else { /* SerDes */
5342 mask = NIG_MASK_SERDES0_LINK_STATUS;
5343 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
5344 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
5345 if ((ext_phy_type !=
5346 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
5347 (ext_phy_type !=
5348 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
5349 mask |= NIG_MASK_MI_INT;
5350 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5351 }
5352 }
5353 bnx2x_bits_en(bp,
5354 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
5355 mask);
5356
5357 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
5358 (params->switch_cfg == SWITCH_CFG_10G),
5359 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
5360 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
5361 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
5362 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
5363 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
5364 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
5365 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
5366 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
5367}
5368
5369static void bnx2x_8481_rearm_latch_signal(struct bnx2x *bp, u8 port,
5370 u8 is_mi_int)
5371{
5372 u32 latch_status = 0, is_mi_int_status;
5373 /* Disable the MI INT ( external phy int )
5374 * by writing 1 to the status register. Link down indication
5375 * is high-active-signal, so in this case we need to write the
5376 * status to clear the XOR
5377 */
5378 /* Read Latched signals */
5379 latch_status = REG_RD(bp,
5380 NIG_REG_LATCH_STATUS_0 + port*8);
5381 is_mi_int_status = REG_RD(bp,
5382 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4);
5383 DP(NETIF_MSG_LINK, "original_signal = 0x%x, nig_status = 0x%x,"
5384 "latch_status = 0x%x\n",
5385 is_mi_int, is_mi_int_status, latch_status);
5386 /* Handle only those with latched-signal=up.*/
5387 if (latch_status & 1) {
5388 /* For all latched-signal=up,Write original_signal to status */
5389 if (is_mi_int)
5390 bnx2x_bits_en(bp,
5391 NIG_REG_STATUS_INTERRUPT_PORT0
5392 + port*4,
5393 NIG_STATUS_EMAC0_MI_INT);
5394 else
5395 bnx2x_bits_dis(bp,
5396 NIG_REG_STATUS_INTERRUPT_PORT0
5397 + port*4,
5398 NIG_STATUS_EMAC0_MI_INT);
5399 /* For all latched-signal=up : Re-Arm Latch signals */
5400 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
5401 (latch_status & 0xfffe) | (latch_status & 1));
5402 }
5403}
5404/*
5405 * link management
5406 */
5407static void bnx2x_link_int_ack(struct link_params *params,
5408 struct link_vars *vars, u8 is_10g,
5409 u8 is_mi_int)
5410{
5411 struct bnx2x *bp = params->bp;
5412 u8 port = params->port;
5413
5414 /* first reset all status
5415 * we assume only one line will be change at a time */
5416 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5417 (NIG_STATUS_XGXS0_LINK10G |
5418 NIG_STATUS_XGXS0_LINK_STATUS |
5419 NIG_STATUS_SERDES0_LINK_STATUS));
5420 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5421 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5422 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5423 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5424 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5425 }
5426 if (vars->phy_link_up) {
5427 if (is_10g) {
5428 /* Disable the 10G link interrupt
5429 * by writing 1 to the status register
5430 */
5431 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
5432 bnx2x_bits_en(bp,
5433 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5434 NIG_STATUS_XGXS0_LINK10G);
5435
5436 } else if (params->switch_cfg == SWITCH_CFG_10G) {
5437 /* Disable the link interrupt
5438 * by writing 1 to the relevant lane
5439 * in the status register
5440 */
5441 u32 ser_lane = ((params->lane_config &
5442 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
5443 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
5444
5445 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
5446 vars->line_speed);
5447 bnx2x_bits_en(bp,
5448 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5449 ((1 << ser_lane) <<
5450 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
5451
5452 } else { /* SerDes */
5453 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
5454 /* Disable the link interrupt
5455 * by writing 1 to the status register
5456 */
5457 bnx2x_bits_en(bp,
5458 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5459 NIG_STATUS_SERDES0_LINK_STATUS);
5460 }
5461
5462 } else { /* link_down */
5463 }
5464}
5465
5466static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
5467{
5468 u8 *str_ptr = str;
5469 u32 mask = 0xf0000000;
5470 u8 shift = 8*4;
5471 u8 digit;
5472 if (len < 10) {
5473 /* Need more than 10chars for this format */
5474 *str_ptr = '\0';
5475 return -EINVAL;
5476 }
5477 while (shift > 0) {
5478
5479 shift -= 4;
5480 digit = ((num & mask) >> shift);
5481 if (digit < 0xa)
5482 *str_ptr = digit + '0';
5483 else
5484 *str_ptr = digit - 0xa + 'a';
5485 str_ptr++;
5486 mask = mask >> 4;
5487 if (shift == 4*4) {
5488 *str_ptr = ':';
5489 str_ptr++;
5490 }
5491 }
5492 *str_ptr = '\0';
5493 return 0;
5494}
5495
5496u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
5497 u8 *version, u16 len)
5498{
5499 struct bnx2x *bp;
5500 u32 ext_phy_type = 0;
5501 u32 spirom_ver = 0;
5502 u8 status;
5503
5504 if (version == NULL || params == NULL)
5505 return -EINVAL;
5506 bp = params->bp;
5507
5508 spirom_ver = REG_RD(bp, params->shmem_base +
5509 offsetof(struct shmem_region,
5510 port_mb[params->port].ext_phy_fw_version));
5511
5512 status = 0;
5513 /* reset the returned value to zero */
5514 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5515 switch (ext_phy_type) {
5516 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5517
5518 if (len < 5)
5519 return -EINVAL;
5520
5521 version[0] = (spirom_ver & 0xFF);
5522 version[1] = (spirom_ver & 0xFF00) >> 8;
5523 version[2] = (spirom_ver & 0xFF0000) >> 16;
5524 version[3] = (spirom_ver & 0xFF000000) >> 24;
5525 version[4] = '\0';
5526
5527 break;
5528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5533 status = bnx2x_format_ver(spirom_ver, version, len);
5534 break;
5535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5536 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5537 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5538 (spirom_ver & 0x7F);
5539 status = bnx2x_format_ver(spirom_ver, version, len);
5540 break;
5541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5543 version[0] = '\0';
5544 break;
5545
5546 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5547 DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:"
5548 " type is FAILURE!\n");
5549 status = -EINVAL;
5550 break;
5551
5552 default:
5553 break;
5554 }
5555 return status;
5556}
5557
5558static void bnx2x_set_xgxs_loopback(struct link_params *params,
5559 struct link_vars *vars,
5560 u8 is_10g)
5561{
5562 u8 port = params->port;
5563 struct bnx2x *bp = params->bp;
5564
5565 if (is_10g) {
5566 u32 md_devad;
5567
5568 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
5569
5570 /* change the uni_phy_addr in the nig */
5571 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
5572 port*0x18));
5573
5574 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
5575
5576 bnx2x_cl45_write(bp, port, 0,
5577 params->phy_addr,
5578 5,
5579 (MDIO_REG_BANK_AER_BLOCK +
5580 (MDIO_AER_BLOCK_AER_REG & 0xf)),
5581 0x2800);
5582
5583 bnx2x_cl45_write(bp, port, 0,
5584 params->phy_addr,
5585 5,
5586 (MDIO_REG_BANK_CL73_IEEEB0 +
5587 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
5588 0x6041);
5589 msleep(200);
5590 /* set aer mmd back */
5591 bnx2x_set_aer_mmd(params, vars);
5592
5593 /* and md_devad */
5594 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
5595 md_devad);
5596
5597 } else {
5598 u16 mii_control;
5599
5600 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
5601
5602 CL45_RD_OVER_CL22(bp, port,
5603 params->phy_addr,
5604 MDIO_REG_BANK_COMBO_IEEE0,
5605 MDIO_COMBO_IEEE0_MII_CONTROL,
5606 &mii_control);
5607
5608 CL45_WR_OVER_CL22(bp, port,
5609 params->phy_addr,
5610 MDIO_REG_BANK_COMBO_IEEE0,
5611 MDIO_COMBO_IEEE0_MII_CONTROL,
5612 (mii_control |
5613 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
5614 }
5615}
5616
5617
5618static void bnx2x_ext_phy_loopback(struct link_params *params)
5619{
5620 struct bnx2x *bp = params->bp;
5621 u8 ext_phy_addr;
5622 u32 ext_phy_type;
5623
5624 if (params->switch_cfg == SWITCH_CFG_10G) {
5625 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5626 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
5627 /* CL37 Autoneg Enabled */
5628 switch (ext_phy_type) {
5629 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5630 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
5631 DP(NETIF_MSG_LINK,
5632 "ext_phy_loopback: We should not get here\n");
5633 break;
5634 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5635 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n");
5636 break;
5637 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5638 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n");
5639 break;
5640 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5641 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
5642 bnx2x_cl45_write(bp, params->port, ext_phy_type,
5643 ext_phy_addr,
5644 MDIO_PMA_DEVAD,
5645 MDIO_PMA_REG_CTRL,
5646 0x0001);
5647 break;
5648 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5649 /* SFX7101_XGXS_TEST1 */
5650 bnx2x_cl45_write(bp, params->port, ext_phy_type,
5651 ext_phy_addr,
5652 MDIO_XS_DEVAD,
5653 MDIO_XS_SFX7101_XGXS_TEST1,
5654 0x100);
5655 DP(NETIF_MSG_LINK,
5656 "ext_phy_loopback: set ext phy loopback\n");
5657 break;
5658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5659
5660 break;
5661 } /* switch external PHY type */
5662 } else {
5663 /* serdes */
5664 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
5665 ext_phy_addr = (params->ext_phy_config &
5666 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK)
5667 >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT;
5668 }
5669}
5670
5671
5672/*
5673 *------------------------------------------------------------------------
5674 * bnx2x_override_led_value -
5675 *
5676 * Override the led value of the requsted led
5677 *
5678 *------------------------------------------------------------------------
5679 */
5680u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5681 u32 led_idx, u32 value)
5682{
5683 u32 reg_val;
5684
5685 /* If port 0 then use EMAC0, else use EMAC1*/
5686 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5687
5688 DP(NETIF_MSG_LINK,
5689 "bnx2x_override_led_value() port %x led_idx %d value %d\n",
5690 port, led_idx, value);
5691
5692 switch (led_idx) {
5693 case 0: /* 10MB led */
5694 /* Read the current value of the LED register in
5695 the EMAC block */
5696 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5697 /* Set the OVERRIDE bit to 1 */
5698 reg_val |= EMAC_LED_OVERRIDE;
5699 /* If value is 1, set the 10M_OVERRIDE bit,
5700 otherwise reset it.*/
5701 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
5702 (reg_val & ~EMAC_LED_10MB_OVERRIDE);
5703 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5704 break;
5705 case 1: /*100MB led */
5706 /*Read the current value of the LED register in
5707 the EMAC block */
5708 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5709 /* Set the OVERRIDE bit to 1 */
5710 reg_val |= EMAC_LED_OVERRIDE;
5711 /* If value is 1, set the 100M_OVERRIDE bit,
5712 otherwise reset it.*/
5713 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
5714 (reg_val & ~EMAC_LED_100MB_OVERRIDE);
5715 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5716 break;
5717 case 2: /* 1000MB led */
5718 /* Read the current value of the LED register in the
5719 EMAC block */
5720 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5721 /* Set the OVERRIDE bit to 1 */
5722 reg_val |= EMAC_LED_OVERRIDE;
5723 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
5724 reset it. */
5725 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
5726 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
5727 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5728 break;
5729 case 3: /* 2500MB led */
5730 /* Read the current value of the LED register in the
5731 EMAC block*/
5732 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5733 /* Set the OVERRIDE bit to 1 */
5734 reg_val |= EMAC_LED_OVERRIDE;
5735 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
5736 reset it.*/
5737 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
5738 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
5739 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5740 break;
5741 case 4: /*10G led */
5742 if (port == 0) {
5743 REG_WR(bp, NIG_REG_LED_10G_P0,
5744 value);
5745 } else {
5746 REG_WR(bp, NIG_REG_LED_10G_P1,
5747 value);
5748 }
5749 break;
5750 case 5: /* TRAFFIC led */
5751 /* Find if the traffic control is via BMAC or EMAC */
5752 if (port == 0)
5753 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
5754 else
5755 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
5756
5757 /* Override the traffic led in the EMAC:*/
5758 if (reg_val == 1) {
5759 /* Read the current value of the LED register in
5760 the EMAC block */
5761 reg_val = REG_RD(bp, emac_base +
5762 EMAC_REG_EMAC_LED);
5763 /* Set the TRAFFIC_OVERRIDE bit to 1 */
5764 reg_val |= EMAC_LED_OVERRIDE;
5765 /* If value is 1, set the TRAFFIC bit, otherwise
5766 reset it.*/
5767 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
5768 (reg_val & ~EMAC_LED_TRAFFIC);
5769 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5770 } else { /* Override the traffic led in the BMAC: */
5771 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5772 + port*4, 1);
5773 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
5774 value);
5775 }
5776 break;
5777 default:
5778 DP(NETIF_MSG_LINK,
5779 "bnx2x_override_led_value() unknown led index %d "
5780 "(should be 0-5)\n", led_idx);
5781 return -EINVAL;
5782 }
5783
5784 return 0;
5785}
5786
5787
5788u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
5789{
5790 u8 port = params->port;
5791 u16 hw_led_mode = params->hw_led_mode;
5792 u8 rc = 0;
5793 u32 tmp;
5794 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5795 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5796 struct bnx2x *bp = params->bp;
5797 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
5798 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
5799 speed, hw_led_mode);
5800 switch (mode) {
5801 case LED_MODE_OFF:
5802 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
5803 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5804 SHARED_HW_CFG_LED_MAC1);
5805
5806 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5807 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
5808 break;
5809
5810 case LED_MODE_OPER:
5811 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5812 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5813 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5814 } else {
5815 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5816 hw_led_mode);
5817 }
5818
5819 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
5820 port*4, 0);
5821 /* Set blinking rate to ~15.9Hz */
5822 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
5823 LED_BLINK_RATE_VAL);
5824 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
5825 port*4, 1);
5826 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5827 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5828 (tmp & (~EMAC_LED_OVERRIDE)));
5829
5830 if (CHIP_IS_E1(bp) &&
5831 ((speed == SPEED_2500) ||
5832 (speed == SPEED_1000) ||
5833 (speed == SPEED_100) ||
5834 (speed == SPEED_10))) {
5835 /* On Everest 1 Ax chip versions for speeds less than
5836 10G LED scheme is different */
5837 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5838 + port*4, 1);
5839 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
5840 port*4, 0);
5841 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
5842 port*4, 1);
5843 }
5844 break;
5845
5846 default:
5847 rc = -EINVAL;
5848 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
5849 mode);
5850 break;
5851 }
5852 return rc;
5853
5854}
5855
5856u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars)
5857{
5858 struct bnx2x *bp = params->bp;
5859 u16 gp_status = 0;
5860
5861 CL45_RD_OVER_CL22(bp, params->port,
5862 params->phy_addr,
5863 MDIO_REG_BANK_GP_STATUS,
5864 MDIO_GP_STATUS_TOP_AN_STATUS1,
5865 &gp_status);
5866 /* link is up only if both local phy and external phy are up */
5867 if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
5868 bnx2x_ext_phy_is_link_up(params, vars, 1))
5869 return 0;
5870
5871 return -ESRCH;
5872}
5873
5874static u8 bnx2x_link_initialize(struct link_params *params,
5875 struct link_vars *vars)
5876{
5877 struct bnx2x *bp = params->bp;
5878 u8 port = params->port;
5879 u8 rc = 0;
5880 u8 non_ext_phy;
5881 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5882
5883 /* Activate the external PHY */
5884 bnx2x_ext_phy_reset(params, vars);
5885
5886 bnx2x_set_aer_mmd(params, vars);
5887
5888 if (vars->phy_flags & PHY_XGXS_FLAG)
5889 bnx2x_set_master_ln(params);
5890
5891 rc = bnx2x_reset_unicore(params);
5892 /* reset the SerDes and wait for reset bit return low */
5893 if (rc != 0)
5894 return rc;
5895
5896 bnx2x_set_aer_mmd(params, vars);
5897
5898 /* setting the masterLn_def again after the reset */
5899 if (vars->phy_flags & PHY_XGXS_FLAG) {
5900 bnx2x_set_master_ln(params);
5901 bnx2x_set_swap_lanes(params);
5902 }
5903
5904 if (vars->phy_flags & PHY_XGXS_FLAG) {
5905 if ((params->req_line_speed &&
5906 ((params->req_line_speed == SPEED_100) ||
5907 (params->req_line_speed == SPEED_10))) ||
5908 (!params->req_line_speed &&
5909 (params->speed_cap_mask >=
5910 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
5911 (params->speed_cap_mask <
5912 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
5913 )) {
5914 vars->phy_flags |= PHY_SGMII_FLAG;
5915 } else {
5916 vars->phy_flags &= ~PHY_SGMII_FLAG;
5917 }
5918 }
5919 /* In case of external phy existance, the line speed would be the
5920 line speed linked up by the external phy. In case it is direct only,
5921 then the line_speed during initialization will be equal to the
5922 req_line_speed*/
5923 vars->line_speed = params->req_line_speed;
5924
5925 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
5926
5927 /* init ext phy and enable link state int */
5928 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
5929 (params->loopback_mode == LOOPBACK_XGXS_10));
5930
5931 if (non_ext_phy ||
5932 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5933 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
5934 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
5935 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
5936 if (params->req_line_speed == SPEED_AUTO_NEG)
5937 bnx2x_set_parallel_detection(params, vars->phy_flags);
5938 bnx2x_init_internal_phy(params, vars, non_ext_phy);
5939 }
5940
5941 if (!non_ext_phy)
5942 rc |= bnx2x_ext_phy_init(params, vars);
5943
5944 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5945 (NIG_STATUS_XGXS0_LINK10G |
5946 NIG_STATUS_XGXS0_LINK_STATUS |
5947 NIG_STATUS_SERDES0_LINK_STATUS));
5948
5949 return rc;
5950
5951}
5952
5953
5954u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5955{
5956 struct bnx2x *bp = params->bp;
5957 u32 val;
5958
5959 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
5960 DP(NETIF_MSG_LINK, "req_speed %d, req_flowctrl %d\n",
5961 params->req_line_speed, params->req_flow_ctrl);
5962 vars->link_status = 0;
5963 vars->phy_link_up = 0;
5964 vars->link_up = 0;
5965 vars->line_speed = 0;
5966 vars->duplex = DUPLEX_FULL;
5967 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5968 vars->mac_type = MAC_TYPE_NONE;
5969
5970 if (params->switch_cfg == SWITCH_CFG_1G)
5971 vars->phy_flags = PHY_SERDES_FLAG;
5972 else
5973 vars->phy_flags = PHY_XGXS_FLAG;
5974
5975 /* disable attentions */
5976 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
5977 (NIG_MASK_XGXS0_LINK_STATUS |
5978 NIG_MASK_XGXS0_LINK10G |
5979 NIG_MASK_SERDES0_LINK_STATUS |
5980 NIG_MASK_MI_INT));
5981
5982 bnx2x_emac_init(params, vars);
5983
5984 if (CHIP_REV_IS_FPGA(bp)) {
5985
5986 vars->link_up = 1;
5987 vars->line_speed = SPEED_10000;
5988 vars->duplex = DUPLEX_FULL;
5989 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5990 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
5991 /* enable on E1.5 FPGA */
5992 if (CHIP_IS_E1H(bp)) {
5993 vars->flow_ctrl |=
5994 (BNX2X_FLOW_CTRL_TX |
5995 BNX2X_FLOW_CTRL_RX);
5996 vars->link_status |=
5997 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
5998 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
5999 }
6000
6001 bnx2x_emac_enable(params, vars, 0);
6002 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
6003 /* disable drain */
6004 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6005
6006 /* update shared memory */
6007 bnx2x_update_mng(params, vars->link_status);
6008
6009 return 0;
6010
6011 } else
6012 if (CHIP_REV_IS_EMUL(bp)) {
6013
6014 vars->link_up = 1;
6015 vars->line_speed = SPEED_10000;
6016 vars->duplex = DUPLEX_FULL;
6017 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6018 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
6019
6020 bnx2x_bmac_enable(params, vars, 0);
6021
6022 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
6023 /* Disable drain */
6024 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
6025 + params->port*4, 0);
6026
6027 /* update shared memory */
6028 bnx2x_update_mng(params, vars->link_status);
6029
6030 return 0;
6031
6032 } else
6033 if (params->loopback_mode == LOOPBACK_BMAC) {
6034
6035 vars->link_up = 1;
6036 vars->line_speed = SPEED_10000;
6037 vars->duplex = DUPLEX_FULL;
6038 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6039 vars->mac_type = MAC_TYPE_BMAC;
6040
6041 vars->phy_flags = PHY_XGXS_FLAG;
6042
6043 bnx2x_phy_deassert(params, vars->phy_flags);
6044 /* set bmac loopback */
6045 bnx2x_bmac_enable(params, vars, 1);
6046
6047 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6048 params->port*4, 0);
6049
6050 } else if (params->loopback_mode == LOOPBACK_EMAC) {
6051
6052 vars->link_up = 1;
6053 vars->line_speed = SPEED_1000;
6054 vars->duplex = DUPLEX_FULL;
6055 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6056 vars->mac_type = MAC_TYPE_EMAC;
6057
6058 vars->phy_flags = PHY_XGXS_FLAG;
6059
6060 bnx2x_phy_deassert(params, vars->phy_flags);
6061 /* set bmac loopback */
6062 bnx2x_emac_enable(params, vars, 1);
6063 bnx2x_emac_program(params, vars->line_speed,
6064 vars->duplex);
6065 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6066 params->port*4, 0);
6067
6068 } else if ((params->loopback_mode == LOOPBACK_XGXS_10) ||
6069 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
6070
6071 vars->link_up = 1;
6072 vars->line_speed = SPEED_10000;
6073 vars->duplex = DUPLEX_FULL;
6074 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6075
6076 vars->phy_flags = PHY_XGXS_FLAG;
6077
6078 val = REG_RD(bp,
6079 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6080 params->port*0x18);
6081 params->phy_addr = (u8)val;
6082
6083 bnx2x_phy_deassert(params, vars->phy_flags);
6084 bnx2x_link_initialize(params, vars);
6085
6086 vars->mac_type = MAC_TYPE_BMAC;
6087
6088 bnx2x_bmac_enable(params, vars, 0);
6089
6090 if (params->loopback_mode == LOOPBACK_XGXS_10) {
6091 /* set 10G XGXS loopback */
6092 bnx2x_set_xgxs_loopback(params, vars, 1);
6093 } else {
6094 /* set external phy loopback */
6095 bnx2x_ext_phy_loopback(params);
6096 }
6097 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6098 params->port*4, 0);
6099
6100 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
6101 } else
6102 /* No loopback */
6103 {
6104 bnx2x_phy_deassert(params, vars->phy_flags);
6105 switch (params->switch_cfg) {
6106 case SWITCH_CFG_1G:
6107 vars->phy_flags |= PHY_SERDES_FLAG;
6108 if ((params->ext_phy_config &
6109 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
6110 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
6111 vars->phy_flags |= PHY_SGMII_FLAG;
6112 }
6113
6114 val = REG_RD(bp,
6115 NIG_REG_SERDES0_CTRL_PHY_ADDR+
6116 params->port*0x10);
6117
6118 params->phy_addr = (u8)val;
6119
6120 break;
6121 case SWITCH_CFG_10G:
6122 vars->phy_flags |= PHY_XGXS_FLAG;
6123 val = REG_RD(bp,
6124 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6125 params->port*0x18);
6126 params->phy_addr = (u8)val;
6127
6128 break;
6129 default:
6130 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
6131 return -EINVAL;
6132 }
6133 DP(NETIF_MSG_LINK, "Phy address = 0x%x\n", params->phy_addr);
6134
6135 bnx2x_link_initialize(params, vars);
6136 msleep(30);
6137 bnx2x_link_int_enable(params);
6138 }
6139 return 0;
6140}
6141
6142static void bnx2x_8726_reset_phy(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
6143{
6144 DP(NETIF_MSG_LINK, "bnx2x_8726_reset_phy port %d\n", port);
6145
6146 /* Set serial boot control for external load */
6147 bnx2x_cl45_write(bp, port,
6148 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, ext_phy_addr,
6149 MDIO_PMA_DEVAD,
6150 MDIO_PMA_REG_GEN_CTRL, 0x0001);
6151}
6152
6153u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6154 u8 reset_ext_phy)
6155{
6156 struct bnx2x *bp = params->bp;
6157 u32 ext_phy_config = params->ext_phy_config;
6158 u8 port = params->port;
6159 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6160 u32 val = REG_RD(bp, params->shmem_base +
6161 offsetof(struct shmem_region, dev_info.
6162 port_feature_config[params->port].
6163 config));
6164 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6165 /* disable attentions */
6166 vars->link_status = 0;
6167 bnx2x_update_mng(params, vars->link_status);
6168 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6169 (NIG_MASK_XGXS0_LINK_STATUS |
6170 NIG_MASK_XGXS0_LINK10G |
6171 NIG_MASK_SERDES0_LINK_STATUS |
6172 NIG_MASK_MI_INT));
6173
6174 /* activate nig drain */
6175 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6176
6177 /* disable nig egress interface */
6178 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
6179 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
6180
6181 /* Stop BigMac rx */
6182 bnx2x_bmac_rx_disable(bp, port);
6183
6184 /* disable emac */
6185 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6186
6187 msleep(10);
6188 /* The PHY reset is controled by GPIO 1
6189 * Hold it as vars low
6190 */
6191 /* clear link led */
6192 bnx2x_set_led(params, LED_MODE_OFF, 0);
6193 if (reset_ext_phy) {
6194 switch (ext_phy_type) {
6195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6196 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6197 break;
6198
6199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6200 {
6201
6202 /* Disable Transmitter */
6203 u8 ext_phy_addr =
6204 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6205 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
6206 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
6207 bnx2x_sfp_set_transmitter(bp, port,
6208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6209 ext_phy_addr, 0);
6210 break;
6211 }
6212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6213 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
6214 "low power mode\n",
6215 port);
6216 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6217 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6218 port);
6219 break;
6220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6221 {
6222 u8 ext_phy_addr =
6223 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6224 /* Set soft reset */
6225 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6226 break;
6227 }
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6229 {
6230 u8 ext_phy_addr =
6231 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6232 bnx2x_cl45_write(bp, port,
6233 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6234 ext_phy_addr,
6235 MDIO_AN_DEVAD,
6236 MDIO_AN_REG_CTRL, 0x0000);
6237 bnx2x_cl45_write(bp, port,
6238 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6239 ext_phy_addr,
6240 MDIO_PMA_DEVAD,
6241 MDIO_PMA_REG_CTRL, 1);
6242 break;
6243 }
6244 default:
6245 /* HW reset */
6246 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6247 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6248 port);
6249 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6250 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6251 port);
6252 DP(NETIF_MSG_LINK, "reset external PHY\n");
6253 }
6254 }
6255 /* reset the SerDes/XGXS */
6256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6257 (0x1ff << (port*16)));
6258
6259 /* reset BigMac */
6260 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6261 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6262
6263 /* disable nig ingress interface */
6264 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
6265 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
6266 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
6267 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
6268 vars->link_up = 0;
6269 return 0;
6270}
6271
6272static u8 bnx2x_update_link_down(struct link_params *params,
6273 struct link_vars *vars)
6274{
6275 struct bnx2x *bp = params->bp;
6276 u8 port = params->port;
6277
6278 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6279 bnx2x_set_led(params, LED_MODE_OFF, 0);
6280
6281 /* indicate no mac active */
6282 vars->mac_type = MAC_TYPE_NONE;
6283
6284 /* update shared memory */
6285 vars->link_status = 0;
6286 vars->line_speed = 0;
6287 bnx2x_update_mng(params, vars->link_status);
6288
6289 /* activate nig drain */
6290 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6291
6292 /* disable emac */
6293 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6294
6295 msleep(10);
6296
6297 /* reset BigMac */
6298 bnx2x_bmac_rx_disable(bp, params->port);
6299 REG_WR(bp, GRCBASE_MISC +
6300 MISC_REGISTERS_RESET_REG_2_CLEAR,
6301 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6302 return 0;
6303}
6304
6305static u8 bnx2x_update_link_up(struct link_params *params,
6306 struct link_vars *vars,
6307 u8 link_10g, u32 gp_status)
6308{
6309 struct bnx2x *bp = params->bp;
6310 u8 port = params->port;
6311 u8 rc = 0;
6312
6313 vars->link_status |= LINK_STATUS_LINK_UP;
6314 if (link_10g) {
6315 bnx2x_bmac_enable(params, vars, 0);
6316 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6317 } else {
6318 rc = bnx2x_emac_program(params, vars->line_speed,
6319 vars->duplex);
6320
6321 bnx2x_emac_enable(params, vars, 0);
6322
6323 /* AN complete? */
6324 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6325 if (!(vars->phy_flags &
6326 PHY_SGMII_FLAG))
6327 bnx2x_set_gmii_tx_driver(params);
6328 }
6329 }
6330
6331 /* PBF - link up */
6332 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6333 vars->line_speed);
6334
6335 /* disable drain */
6336 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6337
6338 /* update shared memory */
6339 bnx2x_update_mng(params, vars->link_status);
6340 msleep(20);
6341 return rc;
6342}
6343/* This function should called upon link interrupt */
6344/* In case vars->link_up, driver needs to
6345 1. Update the pbf
6346 2. Disable drain
6347 3. Update the shared memory
6348 4. Indicate link up
6349 5. Set LEDs
6350 Otherwise,
6351 1. Update shared memory
6352 2. Reset BigMac
6353 3. Report link down
6354 4. Unset LEDs
6355*/
6356u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6357{
6358 struct bnx2x *bp = params->bp;
6359 u8 port = params->port;
6360 u16 gp_status;
6361 u8 link_10g;
6362 u8 ext_phy_link_up, rc = 0;
6363 u32 ext_phy_type;
6364 u8 is_mi_int = 0;
6365
6366 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
6367 port, (vars->phy_flags & PHY_XGXS_FLAG),
6368 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
6369
6370 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
6371 port*0x18) > 0);
6372 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
6373 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
6374 is_mi_int,
6375 REG_RD(bp,
6376 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
6377
6378 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
6379 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6380 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6381
6382 /* disable emac */
6383 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6384
6385 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
6386
6387 /* Check external link change only for non-direct */
6388 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars, is_mi_int);
6389
6390 /* Read gp_status */
6391 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
6392 MDIO_REG_BANK_GP_STATUS,
6393 MDIO_GP_STATUS_TOP_AN_STATUS1,
6394 &gp_status);
6395
6396 rc = bnx2x_link_settings_status(params, vars, gp_status,
6397 ext_phy_link_up);
6398 if (rc != 0)
6399 return rc;
6400
6401 /* anything 10 and over uses the bmac */
6402 link_10g = ((vars->line_speed == SPEED_10000) ||
6403 (vars->line_speed == SPEED_12000) ||
6404 (vars->line_speed == SPEED_12500) ||
6405 (vars->line_speed == SPEED_13000) ||
6406 (vars->line_speed == SPEED_15000) ||
6407 (vars->line_speed == SPEED_16000));
6408
6409 bnx2x_link_int_ack(params, vars, link_10g, is_mi_int);
6410
6411 /* In case external phy link is up, and internal link is down
6412 ( not initialized yet probably after link initialization, it needs
6413 to be initialized.
6414 Note that after link down-up as result of cable plug,
6415 the xgxs link would probably become up again without the need to
6416 initialize it*/
6417
6418 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6419 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6420 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6421 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6422 (ext_phy_link_up && !vars->phy_link_up))
6423 bnx2x_init_internal_phy(params, vars, 0);
6424
6425 /* link is up only if both local phy and external phy are up */
6426 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
6427
6428 if (vars->link_up)
6429 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
6430 else
6431 rc = bnx2x_update_link_down(params, vars);
6432
6433 return rc;
6434}
6435
6436static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6437{
6438 u8 ext_phy_addr[PORT_MAX];
6439 u16 val;
6440 s8 port;
6441
6442 /* PART1 - Reset both phys */
6443 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6444 /* Extract the ext phy address for the port */
6445 u32 ext_phy_config = REG_RD(bp, shmem_base +
6446 offsetof(struct shmem_region,
6447 dev_info.port_hw_config[port].external_phy_config));
6448
6449 /* disable attentions */
6450 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6451 (NIG_MASK_XGXS0_LINK_STATUS |
6452 NIG_MASK_XGXS0_LINK10G |
6453 NIG_MASK_SERDES0_LINK_STATUS |
6454 NIG_MASK_MI_INT));
6455
6456 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6457
6458 /* Need to take the phy out of low power mode in order
6459 to write to access its registers */
6460 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6461 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6462
6463 /* Reset the phy */
6464 bnx2x_cl45_write(bp, port,
6465 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6466 ext_phy_addr[port],
6467 MDIO_PMA_DEVAD,
6468 MDIO_PMA_REG_CTRL,
6469 1<<15);
6470 }
6471
6472 /* Add delay of 150ms after reset */
6473 msleep(150);
6474
6475 /* PART2 - Download firmware to both phys */
6476 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6477 u16 fw_ver1;
6478
6479 bnx2x_bcm8073_external_rom_boot(bp, port,
6480 ext_phy_addr[port], shmem_base);
6481
6482 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6483 ext_phy_addr[port],
6484 MDIO_PMA_DEVAD,
6485 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6486 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
6487 DP(NETIF_MSG_LINK,
6488 "bnx2x_8073_common_init_phy port %x:"
6489 "Download failed. fw version = 0x%x\n",
6490 port, fw_ver1);
6491 return -EINVAL;
6492 }
6493
6494 /* Only set bit 10 = 1 (Tx power down) */
6495 bnx2x_cl45_read(bp, port,
6496 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6497 ext_phy_addr[port],
6498 MDIO_PMA_DEVAD,
6499 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6500
6501 /* Phase1 of TX_POWER_DOWN reset */
6502 bnx2x_cl45_write(bp, port,
6503 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6504 ext_phy_addr[port],
6505 MDIO_PMA_DEVAD,
6506 MDIO_PMA_REG_TX_POWER_DOWN,
6507 (val | 1<<10));
6508 }
6509
6510 /* Toggle Transmitter: Power down and then up with 600ms
6511 delay between */
6512 msleep(600);
6513
6514 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
6515 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6516 /* Phase2 of POWER_DOWN_RESET */
6517 /* Release bit 10 (Release Tx power down) */
6518 bnx2x_cl45_read(bp, port,
6519 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6520 ext_phy_addr[port],
6521 MDIO_PMA_DEVAD,
6522 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6523
6524 bnx2x_cl45_write(bp, port,
6525 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6526 ext_phy_addr[port],
6527 MDIO_PMA_DEVAD,
6528 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
6529 msleep(15);
6530
6531 /* Read modify write the SPI-ROM version select register */
6532 bnx2x_cl45_read(bp, port,
6533 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6534 ext_phy_addr[port],
6535 MDIO_PMA_DEVAD,
6536 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
6537 bnx2x_cl45_write(bp, port,
6538 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6539 ext_phy_addr[port],
6540 MDIO_PMA_DEVAD,
6541 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
6542
6543 /* set GPIO2 back to LOW */
6544 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6545 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6546 }
6547 return 0;
6548
6549}
6550
6551static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6552{
6553 u8 ext_phy_addr[PORT_MAX];
6554 s8 port, first_port, i;
6555 u32 swap_val, swap_override;
6556 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n");
6557 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6558 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6559
6560 bnx2x_ext_phy_hw_reset(bp, 1 ^ (swap_val && swap_override));
6561 msleep(5);
6562
6563 if (swap_val && swap_override)
6564 first_port = PORT_0;
6565 else
6566 first_port = PORT_1;
6567
6568 /* PART1 - Reset both phys */
6569 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
6570 /* Extract the ext phy address for the port */
6571 u32 ext_phy_config = REG_RD(bp, shmem_base +
6572 offsetof(struct shmem_region,
6573 dev_info.port_hw_config[port].external_phy_config));
6574
6575 /* disable attentions */
6576 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6577 (NIG_MASK_XGXS0_LINK_STATUS |
6578 NIG_MASK_XGXS0_LINK10G |
6579 NIG_MASK_SERDES0_LINK_STATUS |
6580 NIG_MASK_MI_INT));
6581
6582 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6583
6584 /* Reset the phy */
6585 bnx2x_cl45_write(bp, port,
6586 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6587 ext_phy_addr[port],
6588 MDIO_PMA_DEVAD,
6589 MDIO_PMA_REG_CTRL,
6590 1<<15);
6591 }
6592
6593 /* Add delay of 150ms after reset */
6594 msleep(150);
6595
6596 /* PART2 - Download firmware to both phys */
6597 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
6598 u16 fw_ver1;
6599
6600 bnx2x_bcm8727_external_rom_boot(bp, port,
6601 ext_phy_addr[port], shmem_base);
6602
6603 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6604 ext_phy_addr[port],
6605 MDIO_PMA_DEVAD,
6606 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6607 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
6608 DP(NETIF_MSG_LINK,
6609 "bnx2x_8727_common_init_phy port %x:"
6610 "Download failed. fw version = 0x%x\n",
6611 port, fw_ver1);
6612 return -EINVAL;
6613 }
6614 }
6615
6616 return 0;
6617}
6618
6619
6620static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6621{
6622 u8 ext_phy_addr;
6623 u32 val;
6624 s8 port;
6625
6626 /* Use port1 because of the static port-swap */
6627 /* Enable the module detection interrupt */
6628 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
6629 val |= ((1<<MISC_REGISTERS_GPIO_3)|
6630 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
6631 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
6632
6633 bnx2x_ext_phy_hw_reset(bp, 1);
6634 msleep(5);
6635 for (port = 0; port < PORT_MAX; port++) {
6636 /* Extract the ext phy address for the port */
6637 u32 ext_phy_config = REG_RD(bp, shmem_base +
6638 offsetof(struct shmem_region,
6639 dev_info.port_hw_config[port].external_phy_config));
6640
6641 ext_phy_addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
6642 DP(NETIF_MSG_LINK, "8726_common_init : ext_phy_addr = 0x%x\n",
6643 ext_phy_addr);
6644
6645 bnx2x_8726_reset_phy(bp, port, ext_phy_addr);
6646
6647 /* Set fault module detected LED on */
6648 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
6649 MISC_REGISTERS_GPIO_HIGH,
6650 port);
6651 }
6652
6653 return 0;
6654}
6655
6656
6657static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6658{
6659 /* HW reset */
6660 bnx2x_ext_phy_hw_reset(bp, 1);
6661 return 0;
6662}
6663u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6664{
6665 u8 rc = 0;
6666 u32 ext_phy_type;
6667
6668 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6669
6670 /* Read the ext_phy_type for arbitrary port(0) */
6671 ext_phy_type = XGXS_EXT_PHY_TYPE(
6672 REG_RD(bp, shmem_base +
6673 offsetof(struct shmem_region,
6674 dev_info.port_hw_config[0].external_phy_config)));
6675
6676 switch (ext_phy_type) {
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6678 {
6679 rc = bnx2x_8073_common_init_phy(bp, shmem_base);
6680 break;
6681 }
6682
6683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6684 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
6685 rc = bnx2x_8727_common_init_phy(bp, shmem_base);
6686 break;
6687
6688 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6689 /* GPIO1 affects both ports, so there's need to pull
6690 it for single port alone */
6691 rc = bnx2x_8726_common_init_phy(bp, shmem_base);
6692 break;
6693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6694 rc = bnx2x_84823_common_init_phy(bp, shmem_base);
6695 break;
6696 default:
6697 DP(NETIF_MSG_LINK,
6698 "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
6699 ext_phy_type);
6700 break;
6701 }
6702
6703 return rc;
6704}
6705
6706void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
6707{
6708 u16 val, cnt;
6709
6710 bnx2x_cl45_read(bp, port,
6711 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6712 phy_addr,
6713 MDIO_PMA_DEVAD,
6714 MDIO_PMA_REG_7101_RESET, &val);
6715
6716 for (cnt = 0; cnt < 10; cnt++) {
6717 msleep(50);
6718 /* Writes a self-clearing reset */
6719 bnx2x_cl45_write(bp, port,
6720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6721 phy_addr,
6722 MDIO_PMA_DEVAD,
6723 MDIO_PMA_REG_7101_RESET,
6724 (val | (1<<15)));
6725 /* Wait for clear */
6726 bnx2x_cl45_read(bp, port,
6727 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6728 phy_addr,
6729 MDIO_PMA_DEVAD,
6730 MDIO_PMA_REG_7101_RESET, &val);
6731
6732 if ((val & (1<<15)) == 0)
6733 break;
6734 }
6735}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
new file mode 100644
index 000000000000..40c2981de8ed
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -0,0 +1,206 @@
1/* Copyright 2008-2009 Broadcom Corporation
2 *
3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you
5 * under the terms of the GNU General Public License version 2, available
6 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
7 *
8 * Notwithstanding the above, under no circumstances may you combine this
9 * software in any way with any other Broadcom software provided under a
10 * license other than the GPL, without Broadcom's express prior written
11 * consent.
12 *
13 * Written by Yaniv Rosner
14 *
15 */
16
17#ifndef BNX2X_LINK_H
18#define BNX2X_LINK_H
19
20
21
22/***********************************************************/
23/* Defines */
24/***********************************************************/
25#define DEFAULT_PHY_DEV_ADDR 3
26
27
28
29#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
30#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
31#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
32#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
33#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
34
35#define SPEED_AUTO_NEG 0
36#define SPEED_12000 12000
37#define SPEED_12500 12500
38#define SPEED_13000 13000
39#define SPEED_15000 15000
40#define SPEED_16000 16000
41
42#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
43#define SFP_EEPROM_VENDOR_NAME_SIZE 16
44#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
45#define SFP_EEPROM_VENDOR_OUI_SIZE 3
46#define SFP_EEPROM_PART_NO_ADDR 0x28
47#define SFP_EEPROM_PART_NO_SIZE 16
48#define PWR_FLT_ERR_MSG_LEN 250
49/***********************************************************/
50/* Structs */
51/***********************************************************/
52/* Inputs parameters to the CLC */
53struct link_params {
54
55 u8 port;
56
57 /* Default / User Configuration */
58 u8 loopback_mode;
59#define LOOPBACK_NONE 0
60#define LOOPBACK_EMAC 1
61#define LOOPBACK_BMAC 2
62#define LOOPBACK_XGXS_10 3
63#define LOOPBACK_EXT_PHY 4
64#define LOOPBACK_EXT 5
65
66 u16 req_duplex;
67 u16 req_flow_ctrl;
68 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
69 req_flow_ctrl is set to AUTO */
70 u16 req_line_speed; /* Also determine AutoNeg */
71
72 /* Device parameters */
73 u8 mac_addr[6];
74
75 /* shmem parameters */
76 u32 shmem_base;
77 u32 speed_cap_mask;
78 u32 switch_cfg;
79#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
80#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
81#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
82
83 u16 hw_led_mode; /* part of the hw_config read from the shmem */
84
85 /* phy_addr populated by the phy_init function */
86 u8 phy_addr;
87 /*u8 reserved1;*/
88
89 u32 lane_config;
90 u32 ext_phy_config;
91#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
92 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
93#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
94 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
95 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
96#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
97 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
98
99 /* Phy register parameter */
100 u32 chip_id;
101
102 u16 xgxs_config_rx[4]; /* preemphasis values for the rx side */
103 u16 xgxs_config_tx[4]; /* preemphasis values for the tx side */
104
105 u32 feature_config_flags;
106#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
107#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
108#define FEATURE_CONFIG_BCM8727_NOC (1<<3)
109
110 /* Device pointer passed to all callback functions */
111 struct bnx2x *bp;
112};
113
114/* Output parameters */
115struct link_vars {
116 u8 phy_flags;
117
118 u8 mac_type;
119#define MAC_TYPE_NONE 0
120#define MAC_TYPE_EMAC 1
121#define MAC_TYPE_BMAC 2
122
123 u8 phy_link_up; /* internal phy link indication */
124 u8 link_up;
125
126 u16 line_speed;
127 u16 duplex;
128
129 u16 flow_ctrl;
130 u16 ieee_fc;
131
132 u32 autoneg;
133#define AUTO_NEG_DISABLED 0x0
134#define AUTO_NEG_ENABLED 0x1
135#define AUTO_NEG_COMPLETE 0x2
136#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
137
138 /* The same definitions as the shmem parameter */
139 u32 link_status;
140};
141
142/***********************************************************/
143/* Functions */
144/***********************************************************/
145
146/* Initialize the phy */
147u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
148
149/* Reset the link. Should be called when driver or interface goes down
150 Before calling phy firmware upgrade, the reset_ext_phy should be set
151 to 0 */
152u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
153 u8 reset_ext_phy);
154
155/* bnx2x_link_update should be called upon link interrupt */
156u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
157
158/* use the following cl45 functions to read/write from external_phy
159 In order to use it to read/write internal phy registers, use
160 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
161 Use ext_phy_type of 0 in case of cl22 over cl45
162 the register */
163u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
164 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val);
165
166u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
167 u8 phy_addr, u8 devad, u16 reg, u16 val);
168
169/* Reads the link_status from the shmem,
170 and update the link vars accordingly */
171void bnx2x_link_status_update(struct link_params *input,
172 struct link_vars *output);
173/* returns string representing the fw_version of the external phy */
174u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
175 u8 *version, u16 len);
176
177/* Set/Unset the led
178 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
182#define LED_MODE_OFF 0
183#define LED_MODE_OPER 2
184
185u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
186
187/* bnx2x_handle_module_detect_int should be called upon module detection
188 interrupt */
189void bnx2x_handle_module_detect_int(struct link_params *params);
190
191/* Get the actual link status. In case it returns 0, link is up,
192 otherwise link is down*/
193u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
194
195/* One-time initialization for external phy after power up */
196u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
197
198/* Reset the external PHY using GPIO */
199void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
200
201void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr);
202
203u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
204 u8 byte_cnt, u8 *o_buf);
205
206#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
new file mode 100644
index 000000000000..51b788339c90
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -0,0 +1,13933 @@
1/* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#include <linux/if_vlan.h>
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <net/ip6_checksum.h>
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/crc32c.h>
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
51#include <linux/io.h>
52#include <linux/stringify.h>
53
54
55#include "bnx2x.h"
56#include "bnx2x_init.h"
57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h"
59
60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04"
62#define BNX2X_BC_VER 0x040200
63
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
77
78static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82MODULE_AUTHOR("Eliezer Tamir");
83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
98
99static int disable_tpa;
100module_param(disable_tpa, int, 0);
101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
107
108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112static int poll;
113module_param(poll, int, 0);
114MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120static int debug;
121module_param(debug, int, 0);
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126static struct workqueue_struct *bnx2x_wq;
127
128enum bnx2x_board_type {
129 BCM57710 = 0,
130 BCM57711 = 1,
131 BCM57711E = 2,
132};
133
134/* indexed by board_type, above */
135static struct {
136 char *name;
137} board_info[] __devinitdata = {
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
141};
142
143
144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
206{
207 struct dmae_command dmae;
208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
220 memset(&dmae, 0, sizeof(struct dmae_command));
221
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225#ifdef __BIG_ENDIAN
226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
227#else
228 DMAE_CMD_ENDIANITY_DW_SWAP |
229#endif
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
240
241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252 mutex_lock(&bp->dmae_mutex);
253
254 *wb_comp = 0;
255
256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258 udelay(5);
259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263 if (!cnt) {
264 BNX2X_ERR("DMAE timeout!\n");
265 break;
266 }
267 cnt--;
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
273 }
274
275 mutex_unlock(&bp->dmae_mutex);
276}
277
278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279{
280 struct dmae_command dmae;
281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
295 memset(&dmae, 0, sizeof(struct dmae_command));
296
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300#ifdef __BIG_ENDIAN
301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
302#else
303 DMAE_CMD_ENDIANITY_DW_SWAP |
304#endif
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
315
316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327 *wb_comp = 0;
328
329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331 udelay(5);
332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
335 if (!cnt) {
336 BNX2X_ERR("DMAE timeout!\n");
337 break;
338 }
339 cnt--;
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
345 }
346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350 mutex_unlock(&bp->dmae_mutex);
351}
352
353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357 int offset = 0;
358
359 while (len > dmae_wr_max) {
360 bnx2x_write_dmae(bp, phys_addr + offset,
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
377}
378
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
392 char last_idx;
393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
395
396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
421 }
422 }
423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
505 }
506 }
507
508 return rc;
509}
510
511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
513 u32 addr;
514 u32 mark, offset;
515 __be32 data[9];
516 int word;
517
518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526 pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528 pr_err("");
529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
532 data[8] = 0x0;
533 pr_cont("%s", (char *)data);
534 }
535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536 for (word = 0; word < 8; word++)
537 data[word] = htonl(REG_RD(bp, offset + 4*word));
538 data[8] = 0x0;
539 pr_cont("%s", (char *)data);
540 }
541 pr_err("end of fw dump\n");
542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552 BNX2X_ERR("begin crash dump -----------------\n");
553
554 /* Indices */
555 /* Common */
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
563 for_each_queue(bp, i) {
564 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
569 i, fp->rx_bd_prod, fp->rx_bd_cons,
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
578
579 /* Tx */
580 for_each_queue(bp, i) {
581 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590 fp->status_blk->c_status_block.status_block_index,
591 fp->tx_db.data.prod);
592 }
593
594 /* Rings */
595 /* Rx */
596 for_each_queue(bp, i) {
597 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601 for (j = start; j != end; j = RX_BD(j + 1)) {
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607 }
608
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
611 for (j = start; j != end; j = RX_SGE(j + 1)) {
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
617 }
618
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626 }
627 }
628
629 /* Tx */
630 for_each_queue(bp, i) {
631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649 }
650 }
651
652 bnx2x_fw_dump(bp);
653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
655}
656
657static void bnx2x_int_enable(struct bnx2x *bp)
658{
659 int port = BP_PORT(bp);
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665 if (msix) {
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
683
684 REG_WR(bp, addr, val);
685
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692 REG_WR(bp, addr, val);
693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703 if (bp->port.pmf)
704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
715}
716
717static void bnx2x_int_disable(struct bnx2x *bp)
718{
719 int port = BP_PORT(bp);
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
731 /* flush all outstanding writes */
732 mmiowb();
733
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740{
741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742 int i, offset;
743
744 /* disable interrupt handling */
745 atomic_inc(&bp->intr_sem);
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
751
752 /* make sure all ISRs are done */
753 if (msix) {
754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
756#ifdef BCM_CNIC
757 offset++;
758#endif
759 for_each_queue(bp, i)
760 synchronize_irq(bp->msix_table[i + offset].vector);
761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
767}
768
769/* fast path */
770
771/*
772 * General service functions
773 */
774
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810 u8 storm, u16 index, u8 op, u8 update)
811{
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
830}
831
832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833{
834 struct host_status_block *fpsb = fp->status_blk;
835
836 barrier(); /* status block is written to by the chip */
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839}
840
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
846
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
849
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863}
864
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
874 struct sk_buff *skb = tx_buf->skb;
875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876 int nbd;
877
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891#ifdef BNX2X_STOP_ON_ERROR
892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893 BNX2X_ERR("BAD nbd!\n");
894 bnx2x_panic();
895 }
896#endif
897 new_cons = nbd + tx_buf->first_bd;
898
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
924 WARN_ON(!skb);
925 dev_kfree_skb(skb);
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
929 return new_cons;
930}
931
932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933{
934 s16 used;
935 u16 prod;
936 u16 cons;
937
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945#ifdef BNX2X_STOP_ON_ERROR
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949#endif
950
951 return (s16)(fp->bp->tx_ring_size) - used;
952}
953
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965{
966 struct bnx2x *bp = fp->bp;
967 struct netdev_queue *txq;
968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
972 return -1;
973#endif
974
975 txq = netdev_get_tx_queue(bp->dev, fp->index);
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
987 hw_cons, sw_cons, pkt_cons);
988
989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
1007 smp_mb();
1008
1009 /* TBD need a thresh? */
1010 if (unlikely(netif_tx_queue_stopped(txq))) {
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
1019 */
1020
1021 __netif_tx_lock(txq, smp_processor_id());
1022
1023 if ((netif_tx_queue_stopped(txq)) &&
1024 (bp->state == BNX2X_STATE_OPEN) &&
1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026 netif_tx_wake_queue(txq);
1027
1028 __netif_tx_unlock(txq);
1029 }
1030 return 0;
1031}
1032
1033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
1036
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044 DP(BNX2X_MSG_SP,
1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1046 fp->index, cid, command, bp->state,
1047 rr_cqe->ramrod_cqe.ramrod_type);
1048
1049 bp->spq_left++;
1050
1051 if (fp->index) {
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
1067 BNX2X_ERR("unexpected MC reply (%d) "
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1070 break;
1071 }
1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073 return;
1074 }
1075
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091 break;
1092
1093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
1099
1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103 bp->set_mac_pending--;
1104 smp_wmb();
1105 break;
1106
1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109 bp->set_mac_pending--;
1110 smp_wmb();
1111 break;
1112
1113 default:
1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1115 command, bp->state);
1116 break;
1117 }
1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
1119}
1120
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
1226 *prod_bd = *cons_bd;
1227}
1228
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
1258 SGE_PAGE_SHIFT;
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
1348#ifdef _ASM_GENERIC_INT_L64_H
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394 rx_pg = &fp->rx_page_ring[sge_idx];
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438 if (likely(new_skb)) {
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
1490 else
1491#endif
1492 napi_gro_receive(&fp->napi, skb);
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
1504 /* else drop the packet and keep the buffer in the bin */
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
1507 fp->eth_q_stats.rx_skb_alloc_failed++;
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
1518 struct ustorm_eth_rx_producers rx_prods = {0};
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539 ((u32 *)&rx_prods)[i]);
1540
1541 mmiowb(); /* keep prod updates ordered */
1542
1543 DP(NETIF_MSG_RX_STATUS,
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546}
1547
1548/* Set Toeplitz hash value in the skb using the value from the
1549 * CQE (calculated by HW).
1550 */
1551static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
1552 struct sk_buff *skb)
1553{
1554 /* Set Toeplitz hash from CQE */
1555 if ((bp->dev->features & NETIF_F_RXHASH) &&
1556 (cqe->fast_path_cqe.status_flags &
1557 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1558 skb->rxhash =
1559 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1560}
1561
1562static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1563{
1564 struct bnx2x *bp = fp->bp;
1565 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1566 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1567 int rx_pkt = 0;
1568
1569#ifdef BNX2X_STOP_ON_ERROR
1570 if (unlikely(bp->panic))
1571 return 0;
1572#endif
1573
1574 /* CQ "next element" is of the size of the regular element,
1575 that's why it's ok here */
1576 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1577 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1578 hw_comp_cons++;
1579
1580 bd_cons = fp->rx_bd_cons;
1581 bd_prod = fp->rx_bd_prod;
1582 bd_prod_fw = bd_prod;
1583 sw_comp_cons = fp->rx_comp_cons;
1584 sw_comp_prod = fp->rx_comp_prod;
1585
1586 /* Memory barrier necessary as speculative reads of the rx
1587 * buffer can be ahead of the index in the status block
1588 */
1589 rmb();
1590
1591 DP(NETIF_MSG_RX_STATUS,
1592 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1593 fp->index, hw_comp_cons, sw_comp_cons);
1594
1595 while (sw_comp_cons != hw_comp_cons) {
1596 struct sw_rx_bd *rx_buf = NULL;
1597 struct sk_buff *skb;
1598 union eth_rx_cqe *cqe;
1599 u8 cqe_fp_flags;
1600 u16 len, pad;
1601
1602 comp_ring_cons = RCQ_BD(sw_comp_cons);
1603 bd_prod = RX_BD(bd_prod);
1604 bd_cons = RX_BD(bd_cons);
1605
1606 /* Prefetch the page containing the BD descriptor
1607 at producer's index. It will be needed when new skb is
1608 allocated */
1609 prefetch((void *)(PAGE_ALIGN((unsigned long)
1610 (&fp->rx_desc_ring[bd_prod])) -
1611 PAGE_SIZE + 1));
1612
1613 cqe = &fp->rx_comp_ring[comp_ring_cons];
1614 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1615
1616 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1617 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1618 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1619 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1621 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1622
1623 /* is this a slowpath msg? */
1624 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1625 bnx2x_sp_event(fp, cqe);
1626 goto next_cqe;
1627
1628 /* this is an rx packet */
1629 } else {
1630 rx_buf = &fp->rx_buf_ring[bd_cons];
1631 skb = rx_buf->skb;
1632 prefetch(skb);
1633 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1634 pad = cqe->fast_path_cqe.placement_offset;
1635
1636 /* If CQE is marked both TPA_START and TPA_END
1637 it is a non-TPA CQE */
1638 if ((!fp->disable_tpa) &&
1639 (TPA_TYPE(cqe_fp_flags) !=
1640 (TPA_TYPE_START | TPA_TYPE_END))) {
1641 u16 queue = cqe->fast_path_cqe.queue_index;
1642
1643 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1644 DP(NETIF_MSG_RX_STATUS,
1645 "calling tpa_start on queue %d\n",
1646 queue);
1647
1648 bnx2x_tpa_start(fp, queue, skb,
1649 bd_cons, bd_prod);
1650
1651 /* Set Toeplitz hash for an LRO skb */
1652 bnx2x_set_skb_rxhash(bp, cqe, skb);
1653
1654 goto next_rx;
1655 }
1656
1657 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1658 DP(NETIF_MSG_RX_STATUS,
1659 "calling tpa_stop on queue %d\n",
1660 queue);
1661
1662 if (!BNX2X_RX_SUM_FIX(cqe))
1663 BNX2X_ERR("STOP on none TCP "
1664 "data\n");
1665
1666 /* This is a size of the linear data
1667 on this skb */
1668 len = le16_to_cpu(cqe->fast_path_cqe.
1669 len_on_bd);
1670 bnx2x_tpa_stop(bp, fp, queue, pad,
1671 len, cqe, comp_ring_cons);
1672#ifdef BNX2X_STOP_ON_ERROR
1673 if (bp->panic)
1674 return 0;
1675#endif
1676
1677 bnx2x_update_sge_prod(fp,
1678 &cqe->fast_path_cqe);
1679 goto next_cqe;
1680 }
1681 }
1682
1683 dma_sync_single_for_device(&bp->pdev->dev,
1684 dma_unmap_addr(rx_buf, mapping),
1685 pad + RX_COPY_THRESH,
1686 DMA_FROM_DEVICE);
1687 prefetch(((char *)(skb)) + 128);
1688
1689 /* is this an error packet? */
1690 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1691 DP(NETIF_MSG_RX_ERR,
1692 "ERROR flags %x rx packet %u\n",
1693 cqe_fp_flags, sw_comp_cons);
1694 fp->eth_q_stats.rx_err_discard_pkt++;
1695 goto reuse_rx;
1696 }
1697
1698 /* Since we don't have a jumbo ring
1699 * copy small packets if mtu > 1500
1700 */
1701 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1702 (len <= RX_COPY_THRESH)) {
1703 struct sk_buff *new_skb;
1704
1705 new_skb = netdev_alloc_skb(bp->dev,
1706 len + pad);
1707 if (new_skb == NULL) {
1708 DP(NETIF_MSG_RX_ERR,
1709 "ERROR packet dropped "
1710 "because of alloc failure\n");
1711 fp->eth_q_stats.rx_skb_alloc_failed++;
1712 goto reuse_rx;
1713 }
1714
1715 /* aligned copy */
1716 skb_copy_from_linear_data_offset(skb, pad,
1717 new_skb->data + pad, len);
1718 skb_reserve(new_skb, pad);
1719 skb_put(new_skb, len);
1720
1721 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1722
1723 skb = new_skb;
1724
1725 } else
1726 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1727 dma_unmap_single(&bp->pdev->dev,
1728 dma_unmap_addr(rx_buf, mapping),
1729 bp->rx_buf_size,
1730 DMA_FROM_DEVICE);
1731 skb_reserve(skb, pad);
1732 skb_put(skb, len);
1733
1734 } else {
1735 DP(NETIF_MSG_RX_ERR,
1736 "ERROR packet dropped because "
1737 "of alloc failure\n");
1738 fp->eth_q_stats.rx_skb_alloc_failed++;
1739reuse_rx:
1740 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1741 goto next_rx;
1742 }
1743
1744 skb->protocol = eth_type_trans(skb, bp->dev);
1745
1746 /* Set Toeplitz hash for a none-LRO skb */
1747 bnx2x_set_skb_rxhash(bp, cqe, skb);
1748
1749 skb->ip_summed = CHECKSUM_NONE;
1750 if (bp->rx_csum) {
1751 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1752 skb->ip_summed = CHECKSUM_UNNECESSARY;
1753 else
1754 fp->eth_q_stats.hw_csum_err++;
1755 }
1756 }
1757
1758 skb_record_rx_queue(skb, fp->index);
1759
1760#ifdef BCM_VLAN
1761 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1762 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1763 PARSING_FLAGS_VLAN))
1764 vlan_gro_receive(&fp->napi, bp->vlgrp,
1765 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1766 else
1767#endif
1768 napi_gro_receive(&fp->napi, skb);
1769
1770
1771next_rx:
1772 rx_buf->skb = NULL;
1773
1774 bd_cons = NEXT_RX_IDX(bd_cons);
1775 bd_prod = NEXT_RX_IDX(bd_prod);
1776 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1777 rx_pkt++;
1778next_cqe:
1779 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1780 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1781
1782 if (rx_pkt == budget)
1783 break;
1784 } /* while */
1785
1786 fp->rx_bd_cons = bd_cons;
1787 fp->rx_bd_prod = bd_prod_fw;
1788 fp->rx_comp_cons = sw_comp_cons;
1789 fp->rx_comp_prod = sw_comp_prod;
1790
1791 /* Update producers */
1792 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1793 fp->rx_sge_prod);
1794
1795 fp->rx_pkt += rx_pkt;
1796 fp->rx_calls++;
1797
1798 return rx_pkt;
1799}
1800
1801static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1802{
1803 struct bnx2x_fastpath *fp = fp_cookie;
1804 struct bnx2x *bp = fp->bp;
1805
1806 /* Return here if interrupt is disabled */
1807 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1808 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1809 return IRQ_HANDLED;
1810 }
1811
1812 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1813 fp->index, fp->sb_id);
1814 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1815
1816#ifdef BNX2X_STOP_ON_ERROR
1817 if (unlikely(bp->panic))
1818 return IRQ_HANDLED;
1819#endif
1820
1821 /* Handle Rx and Tx according to MSI-X vector */
1822 prefetch(fp->rx_cons_sb);
1823 prefetch(fp->tx_cons_sb);
1824 prefetch(&fp->status_blk->u_status_block.status_block_index);
1825 prefetch(&fp->status_blk->c_status_block.status_block_index);
1826 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1827
1828 return IRQ_HANDLED;
1829}
1830
1831static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1832{
1833 struct bnx2x *bp = netdev_priv(dev_instance);
1834 u16 status = bnx2x_ack_int(bp);
1835 u16 mask;
1836 int i;
1837
1838 /* Return here if interrupt is shared and it's not for us */
1839 if (unlikely(status == 0)) {
1840 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1841 return IRQ_NONE;
1842 }
1843 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1844
1845 /* Return here if interrupt is disabled */
1846 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1847 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1848 return IRQ_HANDLED;
1849 }
1850
1851#ifdef BNX2X_STOP_ON_ERROR
1852 if (unlikely(bp->panic))
1853 return IRQ_HANDLED;
1854#endif
1855
1856 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1857 struct bnx2x_fastpath *fp = &bp->fp[i];
1858
1859 mask = 0x2 << fp->sb_id;
1860 if (status & mask) {
1861 /* Handle Rx and Tx according to SB id */
1862 prefetch(fp->rx_cons_sb);
1863 prefetch(&fp->status_blk->u_status_block.
1864 status_block_index);
1865 prefetch(fp->tx_cons_sb);
1866 prefetch(&fp->status_blk->c_status_block.
1867 status_block_index);
1868 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1869 status &= ~mask;
1870 }
1871 }
1872
1873#ifdef BCM_CNIC
1874 mask = 0x2 << CNIC_SB_ID(bp);
1875 if (status & (mask | 0x1)) {
1876 struct cnic_ops *c_ops = NULL;
1877
1878 rcu_read_lock();
1879 c_ops = rcu_dereference(bp->cnic_ops);
1880 if (c_ops)
1881 c_ops->cnic_handler(bp->cnic_data, NULL);
1882 rcu_read_unlock();
1883
1884 status &= ~mask;
1885 }
1886#endif
1887
1888 if (unlikely(status & 0x1)) {
1889 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1890
1891 status &= ~0x1;
1892 if (!status)
1893 return IRQ_HANDLED;
1894 }
1895
1896 if (unlikely(status))
1897 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1898 status);
1899
1900 return IRQ_HANDLED;
1901}
1902
1903/* end of fast path */
1904
1905static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1906
1907/* Link */
1908
1909/*
1910 * General service functions
1911 */
1912
1913static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1914{
1915 u32 lock_status;
1916 u32 resource_bit = (1 << resource);
1917 int func = BP_FUNC(bp);
1918 u32 hw_lock_control_reg;
1919 int cnt;
1920
1921 /* Validating that the resource is within range */
1922 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1923 DP(NETIF_MSG_HW,
1924 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1925 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1926 return -EINVAL;
1927 }
1928
1929 if (func <= 5) {
1930 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1931 } else {
1932 hw_lock_control_reg =
1933 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1934 }
1935
1936 /* Validating that the resource is not already taken */
1937 lock_status = REG_RD(bp, hw_lock_control_reg);
1938 if (lock_status & resource_bit) {
1939 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1940 lock_status, resource_bit);
1941 return -EEXIST;
1942 }
1943
1944 /* Try for 5 second every 5ms */
1945 for (cnt = 0; cnt < 1000; cnt++) {
1946 /* Try to acquire the lock */
1947 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1948 lock_status = REG_RD(bp, hw_lock_control_reg);
1949 if (lock_status & resource_bit)
1950 return 0;
1951
1952 msleep(5);
1953 }
1954 DP(NETIF_MSG_HW, "Timeout\n");
1955 return -EAGAIN;
1956}
1957
1958static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1959{
1960 u32 lock_status;
1961 u32 resource_bit = (1 << resource);
1962 int func = BP_FUNC(bp);
1963 u32 hw_lock_control_reg;
1964
1965 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1966
1967 /* Validating that the resource is within range */
1968 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1969 DP(NETIF_MSG_HW,
1970 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1971 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1972 return -EINVAL;
1973 }
1974
1975 if (func <= 5) {
1976 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1977 } else {
1978 hw_lock_control_reg =
1979 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1980 }
1981
1982 /* Validating that the resource is currently taken */
1983 lock_status = REG_RD(bp, hw_lock_control_reg);
1984 if (!(lock_status & resource_bit)) {
1985 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1986 lock_status, resource_bit);
1987 return -EFAULT;
1988 }
1989
1990 REG_WR(bp, hw_lock_control_reg, resource_bit);
1991 return 0;
1992}
1993
1994/* HW Lock for shared dual port PHYs */
1995static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1996{
1997 mutex_lock(&bp->port.phy_mutex);
1998
1999 if (bp->port.need_hw_lock)
2000 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2001}
2002
2003static void bnx2x_release_phy_lock(struct bnx2x *bp)
2004{
2005 if (bp->port.need_hw_lock)
2006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2007
2008 mutex_unlock(&bp->port.phy_mutex);
2009}
2010
2011int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2012{
2013 /* The GPIO should be swapped if swap register is set and active */
2014 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2015 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2016 int gpio_shift = gpio_num +
2017 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2018 u32 gpio_mask = (1 << gpio_shift);
2019 u32 gpio_reg;
2020 int value;
2021
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024 return -EINVAL;
2025 }
2026
2027 /* read GPIO value */
2028 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2029
2030 /* get the requested pin value */
2031 if ((gpio_reg & gpio_mask) == gpio_mask)
2032 value = 1;
2033 else
2034 value = 0;
2035
2036 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2037
2038 return value;
2039}
2040
2041int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042{
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2049 u32 gpio_reg;
2050
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053 return -EINVAL;
2054 }
2055
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057 /* read GPIO and mask except the float bits */
2058 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2059
2060 switch (mode) {
2061 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2062 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2063 gpio_num, gpio_shift);
2064 /* clear FLOAT and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2067 break;
2068
2069 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2070 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2071 gpio_num, gpio_shift);
2072 /* clear FLOAT and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2075 break;
2076
2077 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2078 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2079 gpio_num, gpio_shift);
2080 /* set FLOAT */
2081 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2082 break;
2083
2084 default:
2085 break;
2086 }
2087
2088 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2089 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2090
2091 return 0;
2092}
2093
2094int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2095{
2096 /* The GPIO should be swapped if swap register is set and active */
2097 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2098 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2099 int gpio_shift = gpio_num +
2100 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2101 u32 gpio_mask = (1 << gpio_shift);
2102 u32 gpio_reg;
2103
2104 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2105 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2106 return -EINVAL;
2107 }
2108
2109 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2110 /* read GPIO int */
2111 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2112
2113 switch (mode) {
2114 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2115 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2116 "output low\n", gpio_num, gpio_shift);
2117 /* clear SET and set CLR */
2118 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2119 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2120 break;
2121
2122 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2123 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2124 "output high\n", gpio_num, gpio_shift);
2125 /* clear CLR and set SET */
2126 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2127 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2128 break;
2129
2130 default:
2131 break;
2132 }
2133
2134 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2135 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2136
2137 return 0;
2138}
2139
2140static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2141{
2142 u32 spio_mask = (1 << spio_num);
2143 u32 spio_reg;
2144
2145 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2146 (spio_num > MISC_REGISTERS_SPIO_7)) {
2147 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2148 return -EINVAL;
2149 }
2150
2151 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2152 /* read SPIO and mask except the float bits */
2153 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2154
2155 switch (mode) {
2156 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2157 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2158 /* clear FLOAT and set CLR */
2159 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2161 break;
2162
2163 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2164 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2165 /* clear FLOAT and set SET */
2166 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2167 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2168 break;
2169
2170 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2171 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2172 /* set FLOAT */
2173 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2174 break;
2175
2176 default:
2177 break;
2178 }
2179
2180 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2181 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2182
2183 return 0;
2184}
2185
2186static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2187{
2188 switch (bp->link_vars.ieee_fc &
2189 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2190 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2191 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192 ADVERTISED_Pause);
2193 break;
2194
2195 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2196 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2197 ADVERTISED_Pause);
2198 break;
2199
2200 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2201 bp->port.advertising |= ADVERTISED_Asym_Pause;
2202 break;
2203
2204 default:
2205 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2206 ADVERTISED_Pause);
2207 break;
2208 }
2209}
2210
2211static void bnx2x_link_report(struct bnx2x *bp)
2212{
2213 if (bp->flags & MF_FUNC_DIS) {
2214 netif_carrier_off(bp->dev);
2215 netdev_err(bp->dev, "NIC Link is Down\n");
2216 return;
2217 }
2218
2219 if (bp->link_vars.link_up) {
2220 u16 line_speed;
2221
2222 if (bp->state == BNX2X_STATE_OPEN)
2223 netif_carrier_on(bp->dev);
2224 netdev_info(bp->dev, "NIC Link is Up, ");
2225
2226 line_speed = bp->link_vars.line_speed;
2227 if (IS_E1HMF(bp)) {
2228 u16 vn_max_rate;
2229
2230 vn_max_rate =
2231 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2232 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2233 if (vn_max_rate < line_speed)
2234 line_speed = vn_max_rate;
2235 }
2236 pr_cont("%d Mbps ", line_speed);
2237
2238 if (bp->link_vars.duplex == DUPLEX_FULL)
2239 pr_cont("full duplex");
2240 else
2241 pr_cont("half duplex");
2242
2243 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2244 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2245 pr_cont(", receive ");
2246 if (bp->link_vars.flow_ctrl &
2247 BNX2X_FLOW_CTRL_TX)
2248 pr_cont("& transmit ");
2249 } else {
2250 pr_cont(", transmit ");
2251 }
2252 pr_cont("flow control ON");
2253 }
2254 pr_cont("\n");
2255
2256 } else { /* link_down */
2257 netif_carrier_off(bp->dev);
2258 netdev_err(bp->dev, "NIC Link is Down\n");
2259 }
2260}
2261
2262static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2263{
2264 if (!BP_NOMCP(bp)) {
2265 u8 rc;
2266
2267 /* Initialize link parameters structure variables */
2268 /* It is recommended to turn off RX FC for jumbo frames
2269 for better performance */
2270 if (bp->dev->mtu > 5000)
2271 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2272 else
2273 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2274
2275 bnx2x_acquire_phy_lock(bp);
2276
2277 if (load_mode == LOAD_DIAG)
2278 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2279
2280 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2281
2282 bnx2x_release_phy_lock(bp);
2283
2284 bnx2x_calc_fc_adv(bp);
2285
2286 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2287 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2288 bnx2x_link_report(bp);
2289 }
2290
2291 return rc;
2292 }
2293 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2294 return -EINVAL;
2295}
2296
2297static void bnx2x_link_set(struct bnx2x *bp)
2298{
2299 if (!BP_NOMCP(bp)) {
2300 bnx2x_acquire_phy_lock(bp);
2301 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2302 bnx2x_release_phy_lock(bp);
2303
2304 bnx2x_calc_fc_adv(bp);
2305 } else
2306 BNX2X_ERR("Bootcode is missing - can not set link\n");
2307}
2308
2309static void bnx2x__link_reset(struct bnx2x *bp)
2310{
2311 if (!BP_NOMCP(bp)) {
2312 bnx2x_acquire_phy_lock(bp);
2313 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2314 bnx2x_release_phy_lock(bp);
2315 } else
2316 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2317}
2318
2319static u8 bnx2x_link_test(struct bnx2x *bp)
2320{
2321 u8 rc = 0;
2322
2323 if (!BP_NOMCP(bp)) {
2324 bnx2x_acquire_phy_lock(bp);
2325 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2326 bnx2x_release_phy_lock(bp);
2327 } else
2328 BNX2X_ERR("Bootcode is missing - can not test link\n");
2329
2330 return rc;
2331}
2332
2333static void bnx2x_init_port_minmax(struct bnx2x *bp)
2334{
2335 u32 r_param = bp->link_vars.line_speed / 8;
2336 u32 fair_periodic_timeout_usec;
2337 u32 t_fair;
2338
2339 memset(&(bp->cmng.rs_vars), 0,
2340 sizeof(struct rate_shaping_vars_per_port));
2341 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2342
2343 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2344 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2345
2346 /* this is the threshold below which no timer arming will occur
2347 1.25 coefficient is for the threshold to be a little bigger
2348 than the real time, to compensate for timer in-accuracy */
2349 bp->cmng.rs_vars.rs_threshold =
2350 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2351
2352 /* resolution of fairness timer */
2353 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2354 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2355 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2356
2357 /* this is the threshold below which we won't arm the timer anymore */
2358 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2359
2360 /* we multiply by 1e3/8 to get bytes/msec.
2361 We don't want the credits to pass a credit
2362 of the t_fair*FAIR_MEM (algorithm resolution) */
2363 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2364 /* since each tick is 4 usec */
2365 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2366}
2367
2368/* Calculates the sum of vn_min_rates.
2369 It's needed for further normalizing of the min_rates.
2370 Returns:
2371 sum of vn_min_rates.
2372 or
2373 0 - if all the min_rates are 0.
2374 In the later case fainess algorithm should be deactivated.
2375 If not all min_rates are zero then those that are zeroes will be set to 1.
2376 */
2377static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2378{
2379 int all_zero = 1;
2380 int port = BP_PORT(bp);
2381 int vn;
2382
2383 bp->vn_weight_sum = 0;
2384 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2385 int func = 2*vn + port;
2386 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2387 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2388 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2389
2390 /* Skip hidden vns */
2391 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2392 continue;
2393
2394 /* If min rate is zero - set it to 1 */
2395 if (!vn_min_rate)
2396 vn_min_rate = DEF_MIN_RATE;
2397 else
2398 all_zero = 0;
2399
2400 bp->vn_weight_sum += vn_min_rate;
2401 }
2402
2403 /* ... only if all min rates are zeros - disable fairness */
2404 if (all_zero) {
2405 bp->cmng.flags.cmng_enables &=
2406 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2407 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2408 " fairness will be disabled\n");
2409 } else
2410 bp->cmng.flags.cmng_enables |=
2411 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2412}
2413
2414static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2415{
2416 struct rate_shaping_vars_per_vn m_rs_vn;
2417 struct fairness_vars_per_vn m_fair_vn;
2418 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2419 u16 vn_min_rate, vn_max_rate;
2420 int i;
2421
2422 /* If function is hidden - set min and max to zeroes */
2423 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2424 vn_min_rate = 0;
2425 vn_max_rate = 0;
2426
2427 } else {
2428 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2429 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2430 /* If min rate is zero - set it to 1 */
2431 if (!vn_min_rate)
2432 vn_min_rate = DEF_MIN_RATE;
2433 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2434 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2435 }
2436 DP(NETIF_MSG_IFUP,
2437 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2438 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2439
2440 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2441 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2442
2443 /* global vn counter - maximal Mbps for this vn */
2444 m_rs_vn.vn_counter.rate = vn_max_rate;
2445
2446 /* quota - number of bytes transmitted in this period */
2447 m_rs_vn.vn_counter.quota =
2448 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2449
2450 if (bp->vn_weight_sum) {
2451 /* credit for each period of the fairness algorithm:
2452 number of bytes in T_FAIR (the vn share the port rate).
2453 vn_weight_sum should not be larger than 10000, thus
2454 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2455 than zero */
2456 m_fair_vn.vn_credit_delta =
2457 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2458 (8 * bp->vn_weight_sum))),
2459 (bp->cmng.fair_vars.fair_threshold * 2));
2460 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2461 m_fair_vn.vn_credit_delta);
2462 }
2463
2464 /* Store it to internal memory */
2465 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2466 REG_WR(bp, BAR_XSTRORM_INTMEM +
2467 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2468 ((u32 *)(&m_rs_vn))[i]);
2469
2470 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2471 REG_WR(bp, BAR_XSTRORM_INTMEM +
2472 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2473 ((u32 *)(&m_fair_vn))[i]);
2474}
2475
2476
2477/* This function is called upon link interrupt */
2478static void bnx2x_link_attn(struct bnx2x *bp)
2479{
2480 u32 prev_link_status = bp->link_vars.link_status;
2481 /* Make sure that we are synced with the current statistics */
2482 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2483
2484 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2485
2486 if (bp->link_vars.link_up) {
2487
2488 /* dropless flow control */
2489 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2490 int port = BP_PORT(bp);
2491 u32 pause_enabled = 0;
2492
2493 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2494 pause_enabled = 1;
2495
2496 REG_WR(bp, BAR_USTRORM_INTMEM +
2497 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2498 pause_enabled);
2499 }
2500
2501 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2502 struct host_port_stats *pstats;
2503
2504 pstats = bnx2x_sp(bp, port_stats);
2505 /* reset old bmac stats */
2506 memset(&(pstats->mac_stx[0]), 0,
2507 sizeof(struct mac_stx));
2508 }
2509 if (bp->state == BNX2X_STATE_OPEN)
2510 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2511 }
2512
2513 /* indicate link status only if link status actually changed */
2514 if (prev_link_status != bp->link_vars.link_status)
2515 bnx2x_link_report(bp);
2516
2517 if (IS_E1HMF(bp)) {
2518 int port = BP_PORT(bp);
2519 int func;
2520 int vn;
2521
2522 /* Set the attention towards other drivers on the same port */
2523 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2524 if (vn == BP_E1HVN(bp))
2525 continue;
2526
2527 func = ((vn << 1) | port);
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2529 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2530 }
2531
2532 if (bp->link_vars.link_up) {
2533 int i;
2534
2535 /* Init rate shaping and fairness contexts */
2536 bnx2x_init_port_minmax(bp);
2537
2538 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2539 bnx2x_init_vn_minmax(bp, 2*vn + port);
2540
2541 /* Store it to internal memory */
2542 for (i = 0;
2543 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2544 REG_WR(bp, BAR_XSTRORM_INTMEM +
2545 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2546 ((u32 *)(&bp->cmng))[i]);
2547 }
2548 }
2549}
2550
2551static void bnx2x__link_status_update(struct bnx2x *bp)
2552{
2553 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2554 return;
2555
2556 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2557
2558 if (bp->link_vars.link_up)
2559 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2560 else
2561 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2562
2563 bnx2x_calc_vn_weight_sum(bp);
2564
2565 /* indicate link status */
2566 bnx2x_link_report(bp);
2567}
2568
2569static void bnx2x_pmf_update(struct bnx2x *bp)
2570{
2571 int port = BP_PORT(bp);
2572 u32 val;
2573
2574 bp->port.pmf = 1;
2575 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2576
2577 /* enable nig attention */
2578 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2579 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2580 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2581
2582 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2583}
2584
2585/* end of Link */
2586
2587/* slow path */
2588
2589/*
2590 * General service functions
2591 */
2592
2593/* send the MCP a request, block until there is a reply */
2594u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2595{
2596 int func = BP_FUNC(bp);
2597 u32 seq = ++bp->fw_seq;
2598 u32 rc = 0;
2599 u32 cnt = 1;
2600 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2601
2602 mutex_lock(&bp->fw_mb_mutex);
2603 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2604 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2605
2606 do {
2607 /* let the FW do it's magic ... */
2608 msleep(delay);
2609
2610 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2611
2612 /* Give the FW up to 5 second (500*10ms) */
2613 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2614
2615 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2616 cnt*delay, rc, seq);
2617
2618 /* is this a reply to our command? */
2619 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2620 rc &= FW_MSG_CODE_MASK;
2621 else {
2622 /* FW BUG! */
2623 BNX2X_ERR("FW failed to respond!\n");
2624 bnx2x_fw_dump(bp);
2625 rc = 0;
2626 }
2627 mutex_unlock(&bp->fw_mb_mutex);
2628
2629 return rc;
2630}
2631
2632static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2633static void bnx2x_set_rx_mode(struct net_device *dev);
2634
2635static void bnx2x_e1h_disable(struct bnx2x *bp)
2636{
2637 int port = BP_PORT(bp);
2638
2639 netif_tx_disable(bp->dev);
2640
2641 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2642
2643 netif_carrier_off(bp->dev);
2644}
2645
2646static void bnx2x_e1h_enable(struct bnx2x *bp)
2647{
2648 int port = BP_PORT(bp);
2649
2650 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2651
2652 /* Tx queue should be only reenabled */
2653 netif_tx_wake_all_queues(bp->dev);
2654
2655 /*
2656 * Should not call netif_carrier_on since it will be called if the link
2657 * is up when checking for link state
2658 */
2659}
2660
2661static void bnx2x_update_min_max(struct bnx2x *bp)
2662{
2663 int port = BP_PORT(bp);
2664 int vn, i;
2665
2666 /* Init rate shaping and fairness contexts */
2667 bnx2x_init_port_minmax(bp);
2668
2669 bnx2x_calc_vn_weight_sum(bp);
2670
2671 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2672 bnx2x_init_vn_minmax(bp, 2*vn + port);
2673
2674 if (bp->port.pmf) {
2675 int func;
2676
2677 /* Set the attention towards other drivers on the same port */
2678 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2679 if (vn == BP_E1HVN(bp))
2680 continue;
2681
2682 func = ((vn << 1) | port);
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2684 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2685 }
2686
2687 /* Store it to internal memory */
2688 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2689 REG_WR(bp, BAR_XSTRORM_INTMEM +
2690 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2691 ((u32 *)(&bp->cmng))[i]);
2692 }
2693}
2694
2695static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2696{
2697 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2698
2699 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2700
2701 /*
2702 * This is the only place besides the function initialization
2703 * where the bp->flags can change so it is done without any
2704 * locks
2705 */
2706 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2707 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2708 bp->flags |= MF_FUNC_DIS;
2709
2710 bnx2x_e1h_disable(bp);
2711 } else {
2712 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2713 bp->flags &= ~MF_FUNC_DIS;
2714
2715 bnx2x_e1h_enable(bp);
2716 }
2717 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2718 }
2719 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2720
2721 bnx2x_update_min_max(bp);
2722 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2723 }
2724
2725 /* Report results to MCP */
2726 if (dcc_event)
2727 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2728 else
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2730}
2731
2732/* must be called under the spq lock */
2733static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2734{
2735 struct eth_spe *next_spe = bp->spq_prod_bd;
2736
2737 if (bp->spq_prod_bd == bp->spq_last_bd) {
2738 bp->spq_prod_bd = bp->spq;
2739 bp->spq_prod_idx = 0;
2740 DP(NETIF_MSG_TIMER, "end of spq\n");
2741 } else {
2742 bp->spq_prod_bd++;
2743 bp->spq_prod_idx++;
2744 }
2745 return next_spe;
2746}
2747
2748/* must be called under the spq lock */
2749static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2750{
2751 int func = BP_FUNC(bp);
2752
2753 /* Make sure that BD data is updated before writing the producer */
2754 wmb();
2755
2756 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2757 bp->spq_prod_idx);
2758 mmiowb();
2759}
2760
2761/* the slow path queue is odd since completions arrive on the fastpath ring */
2762static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2763 u32 data_hi, u32 data_lo, int common)
2764{
2765 struct eth_spe *spe;
2766
2767#ifdef BNX2X_STOP_ON_ERROR
2768 if (unlikely(bp->panic))
2769 return -EIO;
2770#endif
2771
2772 spin_lock_bh(&bp->spq_lock);
2773
2774 if (!bp->spq_left) {
2775 BNX2X_ERR("BUG! SPQ ring full!\n");
2776 spin_unlock_bh(&bp->spq_lock);
2777 bnx2x_panic();
2778 return -EBUSY;
2779 }
2780
2781 spe = bnx2x_sp_get_next(bp);
2782
2783 /* CID needs port number to be encoded int it */
2784 spe->hdr.conn_and_cmd_data =
2785 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2786 HW_CID(bp, cid));
2787 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2788 if (common)
2789 spe->hdr.type |=
2790 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2791
2792 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2793 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2794
2795 bp->spq_left--;
2796
2797 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2798 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2799 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2800 (u32)(U64_LO(bp->spq_mapping) +
2801 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2802 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2803
2804 bnx2x_sp_prod_update(bp);
2805 spin_unlock_bh(&bp->spq_lock);
2806 return 0;
2807}
2808
2809/* acquire split MCP access lock register */
2810static int bnx2x_acquire_alr(struct bnx2x *bp)
2811{
2812 u32 j, val;
2813 int rc = 0;
2814
2815 might_sleep();
2816 for (j = 0; j < 1000; j++) {
2817 val = (1UL << 31);
2818 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2819 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2820 if (val & (1L << 31))
2821 break;
2822
2823 msleep(5);
2824 }
2825 if (!(val & (1L << 31))) {
2826 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2827 rc = -EBUSY;
2828 }
2829
2830 return rc;
2831}
2832
2833/* release split MCP access lock register */
2834static void bnx2x_release_alr(struct bnx2x *bp)
2835{
2836 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2837}
2838
2839static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2840{
2841 struct host_def_status_block *def_sb = bp->def_status_blk;
2842 u16 rc = 0;
2843
2844 barrier(); /* status block is written to by the chip */
2845 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2846 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2847 rc |= 1;
2848 }
2849 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2850 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2851 rc |= 2;
2852 }
2853 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2854 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2855 rc |= 4;
2856 }
2857 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2858 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2859 rc |= 8;
2860 }
2861 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2862 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2863 rc |= 16;
2864 }
2865 return rc;
2866}
2867
2868/*
2869 * slow path service functions
2870 */
2871
2872static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2873{
2874 int port = BP_PORT(bp);
2875 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2876 COMMAND_REG_ATTN_BITS_SET);
2877 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2878 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2879 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2880 NIG_REG_MASK_INTERRUPT_PORT0;
2881 u32 aeu_mask;
2882 u32 nig_mask = 0;
2883
2884 if (bp->attn_state & asserted)
2885 BNX2X_ERR("IGU ERROR\n");
2886
2887 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2888 aeu_mask = REG_RD(bp, aeu_addr);
2889
2890 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2891 aeu_mask, asserted);
2892 aeu_mask &= ~(asserted & 0x3ff);
2893 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2894
2895 REG_WR(bp, aeu_addr, aeu_mask);
2896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2897
2898 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2899 bp->attn_state |= asserted;
2900 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2901
2902 if (asserted & ATTN_HARD_WIRED_MASK) {
2903 if (asserted & ATTN_NIG_FOR_FUNC) {
2904
2905 bnx2x_acquire_phy_lock(bp);
2906
2907 /* save nig interrupt mask */
2908 nig_mask = REG_RD(bp, nig_int_mask_addr);
2909 REG_WR(bp, nig_int_mask_addr, 0);
2910
2911 bnx2x_link_attn(bp);
2912
2913 /* handle unicore attn? */
2914 }
2915 if (asserted & ATTN_SW_TIMER_4_FUNC)
2916 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2917
2918 if (asserted & GPIO_2_FUNC)
2919 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2920
2921 if (asserted & GPIO_3_FUNC)
2922 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2923
2924 if (asserted & GPIO_4_FUNC)
2925 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2926
2927 if (port == 0) {
2928 if (asserted & ATTN_GENERAL_ATTN_1) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2931 }
2932 if (asserted & ATTN_GENERAL_ATTN_2) {
2933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2935 }
2936 if (asserted & ATTN_GENERAL_ATTN_3) {
2937 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2938 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2939 }
2940 } else {
2941 if (asserted & ATTN_GENERAL_ATTN_4) {
2942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2944 }
2945 if (asserted & ATTN_GENERAL_ATTN_5) {
2946 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2947 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2948 }
2949 if (asserted & ATTN_GENERAL_ATTN_6) {
2950 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2951 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2952 }
2953 }
2954
2955 } /* if hardwired */
2956
2957 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2958 asserted, hc_addr);
2959 REG_WR(bp, hc_addr, asserted);
2960
2961 /* now set back the mask */
2962 if (asserted & ATTN_NIG_FOR_FUNC) {
2963 REG_WR(bp, nig_int_mask_addr, nig_mask);
2964 bnx2x_release_phy_lock(bp);
2965 }
2966}
2967
2968static inline void bnx2x_fan_failure(struct bnx2x *bp)
2969{
2970 int port = BP_PORT(bp);
2971
2972 /* mark the failure */
2973 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2974 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2975 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2976 bp->link_params.ext_phy_config);
2977
2978 /* log the failure */
2979 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2980 " the driver to shutdown the card to prevent permanent"
2981 " damage. Please contact OEM Support for assistance\n");
2982}
2983
2984static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2985{
2986 int port = BP_PORT(bp);
2987 int reg_offset;
2988 u32 val, swap_val, swap_override;
2989
2990 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2991 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2992
2993 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2994
2995 val = REG_RD(bp, reg_offset);
2996 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2997 REG_WR(bp, reg_offset, val);
2998
2999 BNX2X_ERR("SPIO5 hw attention\n");
3000
3001 /* Fan failure attention */
3002 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
3003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3004 /* Low power mode is controlled by GPIO 2 */
3005 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3006 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007 /* The PHY reset is controlled by GPIO 1 */
3008 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3009 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3010 break;
3011
3012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3013 /* The PHY reset is controlled by GPIO 1 */
3014 /* fake the port number to cancel the swap done in
3015 set_gpio() */
3016 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3017 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3018 port = (swap_val && swap_override) ^ 1;
3019 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3020 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3021 break;
3022
3023 default:
3024 break;
3025 }
3026 bnx2x_fan_failure(bp);
3027 }
3028
3029 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3030 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3031 bnx2x_acquire_phy_lock(bp);
3032 bnx2x_handle_module_detect_int(&bp->link_params);
3033 bnx2x_release_phy_lock(bp);
3034 }
3035
3036 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3037
3038 val = REG_RD(bp, reg_offset);
3039 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3040 REG_WR(bp, reg_offset, val);
3041
3042 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3043 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3044 bnx2x_panic();
3045 }
3046}
3047
3048static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3049{
3050 u32 val;
3051
3052 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3053
3054 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3055 BNX2X_ERR("DB hw attention 0x%x\n", val);
3056 /* DORQ discard attention */
3057 if (val & 0x2)
3058 BNX2X_ERR("FATAL error from DORQ\n");
3059 }
3060
3061 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3062
3063 int port = BP_PORT(bp);
3064 int reg_offset;
3065
3066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3068
3069 val = REG_RD(bp, reg_offset);
3070 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3071 REG_WR(bp, reg_offset, val);
3072
3073 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3074 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3075 bnx2x_panic();
3076 }
3077}
3078
3079static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3080{
3081 u32 val;
3082
3083 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3084
3085 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3086 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3087 /* CFC error attention */
3088 if (val & 0x2)
3089 BNX2X_ERR("FATAL error from CFC\n");
3090 }
3091
3092 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3093
3094 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3095 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3096 /* RQ_USDMDP_FIFO_OVERFLOW */
3097 if (val & 0x18000)
3098 BNX2X_ERR("FATAL error from PXP\n");
3099 }
3100
3101 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3102
3103 int port = BP_PORT(bp);
3104 int reg_offset;
3105
3106 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3107 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3108
3109 val = REG_RD(bp, reg_offset);
3110 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3111 REG_WR(bp, reg_offset, val);
3112
3113 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3114 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3115 bnx2x_panic();
3116 }
3117}
3118
3119static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3120{
3121 u32 val;
3122
3123 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3124
3125 if (attn & BNX2X_PMF_LINK_ASSERT) {
3126 int func = BP_FUNC(bp);
3127
3128 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3129 bp->mf_config = SHMEM_RD(bp,
3130 mf_cfg.func_mf_config[func].config);
3131 val = SHMEM_RD(bp, func_mb[func].drv_status);
3132 if (val & DRV_STATUS_DCC_EVENT_MASK)
3133 bnx2x_dcc_event(bp,
3134 (val & DRV_STATUS_DCC_EVENT_MASK));
3135 bnx2x__link_status_update(bp);
3136 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3137 bnx2x_pmf_update(bp);
3138
3139 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3140
3141 BNX2X_ERR("MC assert!\n");
3142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3144 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3145 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3146 bnx2x_panic();
3147
3148 } else if (attn & BNX2X_MCP_ASSERT) {
3149
3150 BNX2X_ERR("MCP assert!\n");
3151 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3152 bnx2x_fw_dump(bp);
3153
3154 } else
3155 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3156 }
3157
3158 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3159 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3160 if (attn & BNX2X_GRC_TIMEOUT) {
3161 val = CHIP_IS_E1H(bp) ?
3162 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3163 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3164 }
3165 if (attn & BNX2X_GRC_RSV) {
3166 val = CHIP_IS_E1H(bp) ?
3167 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3168 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3169 }
3170 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3171 }
3172}
3173
3174static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3175static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3176
3177
3178#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3179#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3180#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3181#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3182#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3183#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3184/*
3185 * should be run under rtnl lock
3186 */
3187static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3188{
3189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3192 barrier();
3193 mmiowb();
3194}
3195
3196/*
3197 * should be run under rtnl lock
3198 */
3199static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3200{
3201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3202 val |= (1 << 16);
3203 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3204 barrier();
3205 mmiowb();
3206}
3207
3208/*
3209 * should be run under rtnl lock
3210 */
3211static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3212{
3213 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3214 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3215 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3216}
3217
3218/*
3219 * should be run under rtnl lock
3220 */
3221static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3222{
3223 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3224
3225 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3226
3227 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3229 barrier();
3230 mmiowb();
3231}
3232
3233/*
3234 * should be run under rtnl lock
3235 */
3236static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3237{
3238 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3239
3240 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3241
3242 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3243 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3244 barrier();
3245 mmiowb();
3246
3247 return val1;
3248}
3249
3250/*
3251 * should be run under rtnl lock
3252 */
3253static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3254{
3255 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3256}
3257
3258static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3259{
3260 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3261 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3262}
3263
3264static inline void _print_next_block(int idx, const char *blk)
3265{
3266 if (idx)
3267 pr_cont(", ");
3268 pr_cont("%s", blk);
3269}
3270
3271static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3272{
3273 int i = 0;
3274 u32 cur_bit = 0;
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3278 switch (cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3280 _print_next_block(par_num++, "BRB");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3283 _print_next_block(par_num++, "PARSER");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "TSDM");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3289 _print_next_block(par_num++, "SEARCHER");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3292 _print_next_block(par_num++, "TSEMI");
3293 break;
3294 }
3295
3296 /* Clear the bit */
3297 sig &= ~cur_bit;
3298 }
3299 }
3300
3301 return par_num;
3302}
3303
3304static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3305{
3306 int i = 0;
3307 u32 cur_bit = 0;
3308 for (i = 0; sig; i++) {
3309 cur_bit = ((u32)0x1 << i);
3310 if (sig & cur_bit) {
3311 switch (cur_bit) {
3312 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3313 _print_next_block(par_num++, "PBCLIENT");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3316 _print_next_block(par_num++, "QM");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3319 _print_next_block(par_num++, "XSDM");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3322 _print_next_block(par_num++, "XSEMI");
3323 break;
3324 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3325 _print_next_block(par_num++, "DOORBELLQ");
3326 break;
3327 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3328 _print_next_block(par_num++, "VAUX PCI CORE");
3329 break;
3330 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3331 _print_next_block(par_num++, "DEBUG");
3332 break;
3333 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3334 _print_next_block(par_num++, "USDM");
3335 break;
3336 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3337 _print_next_block(par_num++, "USEMI");
3338 break;
3339 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3340 _print_next_block(par_num++, "UPB");
3341 break;
3342 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSDM");
3344 break;
3345 }
3346
3347 /* Clear the bit */
3348 sig &= ~cur_bit;
3349 }
3350 }
3351
3352 return par_num;
3353}
3354
3355static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3356{
3357 int i = 0;
3358 u32 cur_bit = 0;
3359 for (i = 0; sig; i++) {
3360 cur_bit = ((u32)0x1 << i);
3361 if (sig & cur_bit) {
3362 switch (cur_bit) {
3363 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3364 _print_next_block(par_num++, "CSEMI");
3365 break;
3366 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3367 _print_next_block(par_num++, "PXP");
3368 break;
3369 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3370 _print_next_block(par_num++,
3371 "PXPPCICLOCKCLIENT");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3374 _print_next_block(par_num++, "CFC");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3377 _print_next_block(par_num++, "CDU");
3378 break;
3379 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3380 _print_next_block(par_num++, "IGU");
3381 break;
3382 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3383 _print_next_block(par_num++, "MISC");
3384 break;
3385 }
3386
3387 /* Clear the bit */
3388 sig &= ~cur_bit;
3389 }
3390 }
3391
3392 return par_num;
3393}
3394
3395static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3396{
3397 int i = 0;
3398 u32 cur_bit = 0;
3399 for (i = 0; sig; i++) {
3400 cur_bit = ((u32)0x1 << i);
3401 if (sig & cur_bit) {
3402 switch (cur_bit) {
3403 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3404 _print_next_block(par_num++, "MCP ROM");
3405 break;
3406 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3407 _print_next_block(par_num++, "MCP UMP RX");
3408 break;
3409 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3410 _print_next_block(par_num++, "MCP UMP TX");
3411 break;
3412 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3413 _print_next_block(par_num++, "MCP SCPAD");
3414 break;
3415 }
3416
3417 /* Clear the bit */
3418 sig &= ~cur_bit;
3419 }
3420 }
3421
3422 return par_num;
3423}
3424
3425static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3426 u32 sig2, u32 sig3)
3427{
3428 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3429 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3430 int par_num = 0;
3431 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3432 "[0]:0x%08x [1]:0x%08x "
3433 "[2]:0x%08x [3]:0x%08x\n",
3434 sig0 & HW_PRTY_ASSERT_SET_0,
3435 sig1 & HW_PRTY_ASSERT_SET_1,
3436 sig2 & HW_PRTY_ASSERT_SET_2,
3437 sig3 & HW_PRTY_ASSERT_SET_3);
3438 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3439 bp->dev->name);
3440 par_num = bnx2x_print_blocks_with_parity0(
3441 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3442 par_num = bnx2x_print_blocks_with_parity1(
3443 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3444 par_num = bnx2x_print_blocks_with_parity2(
3445 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3446 par_num = bnx2x_print_blocks_with_parity3(
3447 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3448 printk("\n");
3449 return true;
3450 } else
3451 return false;
3452}
3453
3454static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3455{
3456 struct attn_route attn;
3457 int port = BP_PORT(bp);
3458
3459 attn.sig[0] = REG_RD(bp,
3460 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3461 port*4);
3462 attn.sig[1] = REG_RD(bp,
3463 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3464 port*4);
3465 attn.sig[2] = REG_RD(bp,
3466 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3467 port*4);
3468 attn.sig[3] = REG_RD(bp,
3469 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3470 port*4);
3471
3472 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3473 attn.sig[3]);
3474}
3475
3476static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3477{
3478 struct attn_route attn, *group_mask;
3479 int port = BP_PORT(bp);
3480 int index;
3481 u32 reg_addr;
3482 u32 val;
3483 u32 aeu_mask;
3484
3485 /* need to take HW lock because MCP or other port might also
3486 try to handle this event */
3487 bnx2x_acquire_alr(bp);
3488
3489 if (bnx2x_chk_parity_attn(bp)) {
3490 bp->recovery_state = BNX2X_RECOVERY_INIT;
3491 bnx2x_set_reset_in_progress(bp);
3492 schedule_delayed_work(&bp->reset_task, 0);
3493 /* Disable HW interrupts */
3494 bnx2x_int_disable(bp);
3495 bnx2x_release_alr(bp);
3496 /* In case of parity errors don't handle attentions so that
3497 * other function would "see" parity errors.
3498 */
3499 return;
3500 }
3501
3502 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3503 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3504 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3505 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3506 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3507 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3508
3509 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3510 if (deasserted & (1 << index)) {
3511 group_mask = &bp->attn_group[index];
3512
3513 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3514 index, group_mask->sig[0], group_mask->sig[1],
3515 group_mask->sig[2], group_mask->sig[3]);
3516
3517 bnx2x_attn_int_deasserted3(bp,
3518 attn.sig[3] & group_mask->sig[3]);
3519 bnx2x_attn_int_deasserted1(bp,
3520 attn.sig[1] & group_mask->sig[1]);
3521 bnx2x_attn_int_deasserted2(bp,
3522 attn.sig[2] & group_mask->sig[2]);
3523 bnx2x_attn_int_deasserted0(bp,
3524 attn.sig[0] & group_mask->sig[0]);
3525 }
3526 }
3527
3528 bnx2x_release_alr(bp);
3529
3530 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3531
3532 val = ~deasserted;
3533 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3534 val, reg_addr);
3535 REG_WR(bp, reg_addr, val);
3536
3537 if (~bp->attn_state & deasserted)
3538 BNX2X_ERR("IGU ERROR\n");
3539
3540 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3541 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3542
3543 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3544 aeu_mask = REG_RD(bp, reg_addr);
3545
3546 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3547 aeu_mask, deasserted);
3548 aeu_mask |= (deasserted & 0x3ff);
3549 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3550
3551 REG_WR(bp, reg_addr, aeu_mask);
3552 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3553
3554 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3555 bp->attn_state &= ~deasserted;
3556 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3557}
3558
3559static void bnx2x_attn_int(struct bnx2x *bp)
3560{
3561 /* read local copy of bits */
3562 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3563 attn_bits);
3564 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3565 attn_bits_ack);
3566 u32 attn_state = bp->attn_state;
3567
3568 /* look for changed bits */
3569 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3570 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3571
3572 DP(NETIF_MSG_HW,
3573 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3574 attn_bits, attn_ack, asserted, deasserted);
3575
3576 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3577 BNX2X_ERR("BAD attention state\n");
3578
3579 /* handle bits that were raised */
3580 if (asserted)
3581 bnx2x_attn_int_asserted(bp, asserted);
3582
3583 if (deasserted)
3584 bnx2x_attn_int_deasserted(bp, deasserted);
3585}
3586
3587static void bnx2x_sp_task(struct work_struct *work)
3588{
3589 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3590 u16 status;
3591
3592 /* Return here if interrupt is disabled */
3593 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3594 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3595 return;
3596 }
3597
3598 status = bnx2x_update_dsb_idx(bp);
3599/* if (status == 0) */
3600/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3601
3602 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3603
3604 /* HW attentions */
3605 if (status & 0x1) {
3606 bnx2x_attn_int(bp);
3607 status &= ~0x1;
3608 }
3609
3610 /* CStorm events: STAT_QUERY */
3611 if (status & 0x2) {
3612 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3613 status &= ~0x2;
3614 }
3615
3616 if (unlikely(status))
3617 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3618 status);
3619
3620 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3621 IGU_INT_NOP, 1);
3622 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3623 IGU_INT_NOP, 1);
3624 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3625 IGU_INT_NOP, 1);
3626 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3627 IGU_INT_NOP, 1);
3628 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3629 IGU_INT_ENABLE, 1);
3630}
3631
3632static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3633{
3634 struct net_device *dev = dev_instance;
3635 struct bnx2x *bp = netdev_priv(dev);
3636
3637 /* Return here if interrupt is disabled */
3638 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3639 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3640 return IRQ_HANDLED;
3641 }
3642
3643 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3644
3645#ifdef BNX2X_STOP_ON_ERROR
3646 if (unlikely(bp->panic))
3647 return IRQ_HANDLED;
3648#endif
3649
3650#ifdef BCM_CNIC
3651 {
3652 struct cnic_ops *c_ops;
3653
3654 rcu_read_lock();
3655 c_ops = rcu_dereference(bp->cnic_ops);
3656 if (c_ops)
3657 c_ops->cnic_handler(bp->cnic_data, NULL);
3658 rcu_read_unlock();
3659 }
3660#endif
3661 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3662
3663 return IRQ_HANDLED;
3664}
3665
3666/* end of slow path */
3667
3668/* Statistics */
3669
3670/****************************************************************************
3671* Macros
3672****************************************************************************/
3673
3674/* sum[hi:lo] += add[hi:lo] */
3675#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3676 do { \
3677 s_lo += a_lo; \
3678 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3679 } while (0)
3680
3681/* difference = minuend - subtrahend */
3682#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3683 do { \
3684 if (m_lo < s_lo) { \
3685 /* underflow */ \
3686 d_hi = m_hi - s_hi; \
3687 if (d_hi > 0) { \
3688 /* we can 'loan' 1 */ \
3689 d_hi--; \
3690 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3691 } else { \
3692 /* m_hi <= s_hi */ \
3693 d_hi = 0; \
3694 d_lo = 0; \
3695 } \
3696 } else { \
3697 /* m_lo >= s_lo */ \
3698 if (m_hi < s_hi) { \
3699 d_hi = 0; \
3700 d_lo = 0; \
3701 } else { \
3702 /* m_hi >= s_hi */ \
3703 d_hi = m_hi - s_hi; \
3704 d_lo = m_lo - s_lo; \
3705 } \
3706 } \
3707 } while (0)
3708
3709#define UPDATE_STAT64(s, t) \
3710 do { \
3711 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3712 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3713 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3714 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3715 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3716 pstats->mac_stx[1].t##_lo, diff.lo); \
3717 } while (0)
3718
3719#define UPDATE_STAT64_NIG(s, t) \
3720 do { \
3721 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3722 diff.lo, new->s##_lo, old->s##_lo); \
3723 ADD_64(estats->t##_hi, diff.hi, \
3724 estats->t##_lo, diff.lo); \
3725 } while (0)
3726
3727/* sum[hi:lo] += add */
3728#define ADD_EXTEND_64(s_hi, s_lo, a) \
3729 do { \
3730 s_lo += a; \
3731 s_hi += (s_lo < a) ? 1 : 0; \
3732 } while (0)
3733
3734#define UPDATE_EXTEND_STAT(s) \
3735 do { \
3736 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3737 pstats->mac_stx[1].s##_lo, \
3738 new->s); \
3739 } while (0)
3740
3741#define UPDATE_EXTEND_TSTAT(s, t) \
3742 do { \
3743 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3744 old_tclient->s = tclient->s; \
3745 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746 } while (0)
3747
3748#define UPDATE_EXTEND_USTAT(s, t) \
3749 do { \
3750 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3751 old_uclient->s = uclient->s; \
3752 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3753 } while (0)
3754
3755#define UPDATE_EXTEND_XSTAT(s, t) \
3756 do { \
3757 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3758 old_xclient->s = xclient->s; \
3759 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3760 } while (0)
3761
3762/* minuend -= subtrahend */
3763#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3764 do { \
3765 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3766 } while (0)
3767
3768/* minuend[hi:lo] -= subtrahend */
3769#define SUB_EXTEND_64(m_hi, m_lo, s) \
3770 do { \
3771 SUB_64(m_hi, 0, m_lo, s); \
3772 } while (0)
3773
3774#define SUB_EXTEND_USTAT(s, t) \
3775 do { \
3776 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3777 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3778 } while (0)
3779
3780/*
3781 * General service functions
3782 */
3783
3784static inline long bnx2x_hilo(u32 *hiref)
3785{
3786 u32 lo = *(hiref + 1);
3787#if (BITS_PER_LONG == 64)
3788 u32 hi = *hiref;
3789
3790 return HILO_U64(hi, lo);
3791#else
3792 return lo;
3793#endif
3794}
3795
3796/*
3797 * Init service functions
3798 */
3799
3800static void bnx2x_storm_stats_post(struct bnx2x *bp)
3801{
3802 if (!bp->stats_pending) {
3803 struct eth_query_ramrod_data ramrod_data = {0};
3804 int i, rc;
3805
3806 ramrod_data.drv_counter = bp->stats_counter++;
3807 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3808 for_each_queue(bp, i)
3809 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3810
3811 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3812 ((u32 *)&ramrod_data)[1],
3813 ((u32 *)&ramrod_data)[0], 0);
3814 if (rc == 0) {
3815 /* stats ramrod has it's own slot on the spq */
3816 bp->spq_left++;
3817 bp->stats_pending = 1;
3818 }
3819 }
3820}
3821
3822static void bnx2x_hw_stats_post(struct bnx2x *bp)
3823{
3824 struct dmae_command *dmae = &bp->stats_dmae;
3825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3826
3827 *stats_comp = DMAE_COMP_VAL;
3828 if (CHIP_REV_IS_SLOW(bp))
3829 return;
3830
3831 /* loader */
3832 if (bp->executer_idx) {
3833 int loader_idx = PMF_DMAE_C(bp);
3834
3835 memset(dmae, 0, sizeof(struct dmae_command));
3836
3837 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3838 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3839 DMAE_CMD_DST_RESET |
3840#ifdef __BIG_ENDIAN
3841 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3842#else
3843 DMAE_CMD_ENDIANITY_DW_SWAP |
3844#endif
3845 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3846 DMAE_CMD_PORT_0) |
3847 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3848 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3849 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3850 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3851 sizeof(struct dmae_command) *
3852 (loader_idx + 1)) >> 2;
3853 dmae->dst_addr_hi = 0;
3854 dmae->len = sizeof(struct dmae_command) >> 2;
3855 if (CHIP_IS_E1(bp))
3856 dmae->len--;
3857 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3858 dmae->comp_addr_hi = 0;
3859 dmae->comp_val = 1;
3860
3861 *stats_comp = 0;
3862 bnx2x_post_dmae(bp, dmae, loader_idx);
3863
3864 } else if (bp->func_stx) {
3865 *stats_comp = 0;
3866 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3867 }
3868}
3869
3870static int bnx2x_stats_comp(struct bnx2x *bp)
3871{
3872 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3873 int cnt = 10;
3874
3875 might_sleep();
3876 while (*stats_comp != DMAE_COMP_VAL) {
3877 if (!cnt) {
3878 BNX2X_ERR("timeout waiting for stats finished\n");
3879 break;
3880 }
3881 cnt--;
3882 msleep(1);
3883 }
3884 return 1;
3885}
3886
3887/*
3888 * Statistics service functions
3889 */
3890
3891static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3892{
3893 struct dmae_command *dmae;
3894 u32 opcode;
3895 int loader_idx = PMF_DMAE_C(bp);
3896 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3897
3898 /* sanity */
3899 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3900 BNX2X_ERR("BUG!\n");
3901 return;
3902 }
3903
3904 bp->executer_idx = 0;
3905
3906 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3907 DMAE_CMD_C_ENABLE |
3908 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3909#ifdef __BIG_ENDIAN
3910 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3911#else
3912 DMAE_CMD_ENDIANITY_DW_SWAP |
3913#endif
3914 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3915 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3916
3917 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3918 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3919 dmae->src_addr_lo = bp->port.port_stx >> 2;
3920 dmae->src_addr_hi = 0;
3921 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3922 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3923 dmae->len = DMAE_LEN32_RD_MAX;
3924 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3925 dmae->comp_addr_hi = 0;
3926 dmae->comp_val = 1;
3927
3928 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3929 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3930 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3931 dmae->src_addr_hi = 0;
3932 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3933 DMAE_LEN32_RD_MAX * 4);
3934 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3935 DMAE_LEN32_RD_MAX * 4);
3936 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3937 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3938 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3939 dmae->comp_val = DMAE_COMP_VAL;
3940
3941 *stats_comp = 0;
3942 bnx2x_hw_stats_post(bp);
3943 bnx2x_stats_comp(bp);
3944}
3945
3946static void bnx2x_port_stats_init(struct bnx2x *bp)
3947{
3948 struct dmae_command *dmae;
3949 int port = BP_PORT(bp);
3950 int vn = BP_E1HVN(bp);
3951 u32 opcode;
3952 int loader_idx = PMF_DMAE_C(bp);
3953 u32 mac_addr;
3954 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3955
3956 /* sanity */
3957 if (!bp->link_vars.link_up || !bp->port.pmf) {
3958 BNX2X_ERR("BUG!\n");
3959 return;
3960 }
3961
3962 bp->executer_idx = 0;
3963
3964 /* MCP */
3965 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3966 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3967 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3968#ifdef __BIG_ENDIAN
3969 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3970#else
3971 DMAE_CMD_ENDIANITY_DW_SWAP |
3972#endif
3973 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3974 (vn << DMAE_CMD_E1HVN_SHIFT));
3975
3976 if (bp->port.port_stx) {
3977
3978 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979 dmae->opcode = opcode;
3980 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3981 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3982 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3983 dmae->dst_addr_hi = 0;
3984 dmae->len = sizeof(struct host_port_stats) >> 2;
3985 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986 dmae->comp_addr_hi = 0;
3987 dmae->comp_val = 1;
3988 }
3989
3990 if (bp->func_stx) {
3991
3992 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3993 dmae->opcode = opcode;
3994 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3995 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3996 dmae->dst_addr_lo = bp->func_stx >> 2;
3997 dmae->dst_addr_hi = 0;
3998 dmae->len = sizeof(struct host_func_stats) >> 2;
3999 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4000 dmae->comp_addr_hi = 0;
4001 dmae->comp_val = 1;
4002 }
4003
4004 /* MAC */
4005 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4006 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4007 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4008#ifdef __BIG_ENDIAN
4009 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4010#else
4011 DMAE_CMD_ENDIANITY_DW_SWAP |
4012#endif
4013 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4014 (vn << DMAE_CMD_E1HVN_SHIFT));
4015
4016 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4017
4018 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4019 NIG_REG_INGRESS_BMAC0_MEM);
4020
4021 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4022 BIGMAC_REGISTER_TX_STAT_GTBYT */
4023 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4024 dmae->opcode = opcode;
4025 dmae->src_addr_lo = (mac_addr +
4026 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4027 dmae->src_addr_hi = 0;
4028 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4029 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4030 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4031 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4032 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4033 dmae->comp_addr_hi = 0;
4034 dmae->comp_val = 1;
4035
4036 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4037 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4038 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039 dmae->opcode = opcode;
4040 dmae->src_addr_lo = (mac_addr +
4041 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4042 dmae->src_addr_hi = 0;
4043 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4044 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4045 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4046 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4047 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4048 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4049 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4050 dmae->comp_addr_hi = 0;
4051 dmae->comp_val = 1;
4052
4053 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4054
4055 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4056
4057 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4058 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4059 dmae->opcode = opcode;
4060 dmae->src_addr_lo = (mac_addr +
4061 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4062 dmae->src_addr_hi = 0;
4063 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4064 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4065 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4066 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4067 dmae->comp_addr_hi = 0;
4068 dmae->comp_val = 1;
4069
4070 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = opcode;
4073 dmae->src_addr_lo = (mac_addr +
4074 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4075 dmae->src_addr_hi = 0;
4076 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4077 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4078 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4079 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4080 dmae->len = 1;
4081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082 dmae->comp_addr_hi = 0;
4083 dmae->comp_val = 1;
4084
4085 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4086 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4087 dmae->opcode = opcode;
4088 dmae->src_addr_lo = (mac_addr +
4089 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4090 dmae->src_addr_hi = 0;
4091 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4092 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4093 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4094 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4095 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4096 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097 dmae->comp_addr_hi = 0;
4098 dmae->comp_val = 1;
4099 }
4100
4101 /* NIG */
4102 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4103 dmae->opcode = opcode;
4104 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4105 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4106 dmae->src_addr_hi = 0;
4107 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4108 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4109 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4110 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111 dmae->comp_addr_hi = 0;
4112 dmae->comp_val = 1;
4113
4114 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115 dmae->opcode = opcode;
4116 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4117 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4118 dmae->src_addr_hi = 0;
4119 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4120 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4121 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4122 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4123 dmae->len = (2*sizeof(u32)) >> 2;
4124 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4125 dmae->comp_addr_hi = 0;
4126 dmae->comp_val = 1;
4127
4128 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4129 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4130 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4131 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4132#ifdef __BIG_ENDIAN
4133 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4134#else
4135 DMAE_CMD_ENDIANITY_DW_SWAP |
4136#endif
4137 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4138 (vn << DMAE_CMD_E1HVN_SHIFT));
4139 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4140 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4141 dmae->src_addr_hi = 0;
4142 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4143 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4144 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4145 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4146 dmae->len = (2*sizeof(u32)) >> 2;
4147 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4148 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4149 dmae->comp_val = DMAE_COMP_VAL;
4150
4151 *stats_comp = 0;
4152}
4153
4154static void bnx2x_func_stats_init(struct bnx2x *bp)
4155{
4156 struct dmae_command *dmae = &bp->stats_dmae;
4157 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4158
4159 /* sanity */
4160 if (!bp->func_stx) {
4161 BNX2X_ERR("BUG!\n");
4162 return;
4163 }
4164
4165 bp->executer_idx = 0;
4166 memset(dmae, 0, sizeof(struct dmae_command));
4167
4168 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4169 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4170 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4171#ifdef __BIG_ENDIAN
4172 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4173#else
4174 DMAE_CMD_ENDIANITY_DW_SWAP |
4175#endif
4176 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4177 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4180 dmae->dst_addr_lo = bp->func_stx >> 2;
4181 dmae->dst_addr_hi = 0;
4182 dmae->len = sizeof(struct host_func_stats) >> 2;
4183 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4184 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4185 dmae->comp_val = DMAE_COMP_VAL;
4186
4187 *stats_comp = 0;
4188}
4189
4190static void bnx2x_stats_start(struct bnx2x *bp)
4191{
4192 if (bp->port.pmf)
4193 bnx2x_port_stats_init(bp);
4194
4195 else if (bp->func_stx)
4196 bnx2x_func_stats_init(bp);
4197
4198 bnx2x_hw_stats_post(bp);
4199 bnx2x_storm_stats_post(bp);
4200}
4201
4202static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4203{
4204 bnx2x_stats_comp(bp);
4205 bnx2x_stats_pmf_update(bp);
4206 bnx2x_stats_start(bp);
4207}
4208
4209static void bnx2x_stats_restart(struct bnx2x *bp)
4210{
4211 bnx2x_stats_comp(bp);
4212 bnx2x_stats_start(bp);
4213}
4214
4215static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4216{
4217 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4218 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4219 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4220 struct {
4221 u32 lo;
4222 u32 hi;
4223 } diff;
4224
4225 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4226 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4227 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4228 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4229 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4230 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4231 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4232 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4233 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4234 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4235 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4236 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4237 UPDATE_STAT64(tx_stat_gt127,
4238 tx_stat_etherstatspkts65octetsto127octets);
4239 UPDATE_STAT64(tx_stat_gt255,
4240 tx_stat_etherstatspkts128octetsto255octets);
4241 UPDATE_STAT64(tx_stat_gt511,
4242 tx_stat_etherstatspkts256octetsto511octets);
4243 UPDATE_STAT64(tx_stat_gt1023,
4244 tx_stat_etherstatspkts512octetsto1023octets);
4245 UPDATE_STAT64(tx_stat_gt1518,
4246 tx_stat_etherstatspkts1024octetsto1522octets);
4247 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4248 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4249 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4250 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4251 UPDATE_STAT64(tx_stat_gterr,
4252 tx_stat_dot3statsinternalmactransmiterrors);
4253 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4254
4255 estats->pause_frames_received_hi =
4256 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4257 estats->pause_frames_received_lo =
4258 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4259
4260 estats->pause_frames_sent_hi =
4261 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4262 estats->pause_frames_sent_lo =
4263 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4264}
4265
4266static void bnx2x_emac_stats_update(struct bnx2x *bp)
4267{
4268 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4269 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4270 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4271
4272 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4273 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4274 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4275 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4276 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4277 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4278 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4279 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4280 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4281 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4282 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4283 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4284 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4285 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4286 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4287 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4288 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4289 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4290 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4291 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4292 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4293 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4294 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4295 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4296 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4297 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4298 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4299 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4300 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4301 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4302 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4303
4304 estats->pause_frames_received_hi =
4305 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4306 estats->pause_frames_received_lo =
4307 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4308 ADD_64(estats->pause_frames_received_hi,
4309 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4310 estats->pause_frames_received_lo,
4311 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4312
4313 estats->pause_frames_sent_hi =
4314 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4315 estats->pause_frames_sent_lo =
4316 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4317 ADD_64(estats->pause_frames_sent_hi,
4318 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4319 estats->pause_frames_sent_lo,
4320 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4321}
4322
4323static int bnx2x_hw_stats_update(struct bnx2x *bp)
4324{
4325 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4326 struct nig_stats *old = &(bp->port.old_nig_stats);
4327 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4328 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4329 struct {
4330 u32 lo;
4331 u32 hi;
4332 } diff;
4333
4334 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4335 bnx2x_bmac_stats_update(bp);
4336
4337 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4338 bnx2x_emac_stats_update(bp);
4339
4340 else { /* unreached */
4341 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4342 return -1;
4343 }
4344
4345 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4346 new->brb_discard - old->brb_discard);
4347 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4348 new->brb_truncate - old->brb_truncate);
4349
4350 UPDATE_STAT64_NIG(egress_mac_pkt0,
4351 etherstatspkts1024octetsto1522octets);
4352 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4353
4354 memcpy(old, new, sizeof(struct nig_stats));
4355
4356 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4357 sizeof(struct mac_stx));
4358 estats->brb_drop_hi = pstats->brb_drop_hi;
4359 estats->brb_drop_lo = pstats->brb_drop_lo;
4360
4361 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4362
4363 if (!BP_NOMCP(bp)) {
4364 u32 nig_timer_max =
4365 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4366 if (nig_timer_max != estats->nig_timer_max) {
4367 estats->nig_timer_max = nig_timer_max;
4368 BNX2X_ERR("NIG timer max (%u)\n",
4369 estats->nig_timer_max);
4370 }
4371 }
4372
4373 return 0;
4374}
4375
4376static int bnx2x_storm_stats_update(struct bnx2x *bp)
4377{
4378 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4379 struct tstorm_per_port_stats *tport =
4380 &stats->tstorm_common.port_statistics;
4381 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4382 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4383 int i;
4384
4385 memcpy(&(fstats->total_bytes_received_hi),
4386 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4387 sizeof(struct host_func_stats) - 2*sizeof(u32));
4388 estats->error_bytes_received_hi = 0;
4389 estats->error_bytes_received_lo = 0;
4390 estats->etherstatsoverrsizepkts_hi = 0;
4391 estats->etherstatsoverrsizepkts_lo = 0;
4392 estats->no_buff_discard_hi = 0;
4393 estats->no_buff_discard_lo = 0;
4394
4395 for_each_queue(bp, i) {
4396 struct bnx2x_fastpath *fp = &bp->fp[i];
4397 int cl_id = fp->cl_id;
4398 struct tstorm_per_client_stats *tclient =
4399 &stats->tstorm_common.client_statistics[cl_id];
4400 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4401 struct ustorm_per_client_stats *uclient =
4402 &stats->ustorm_common.client_statistics[cl_id];
4403 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4404 struct xstorm_per_client_stats *xclient =
4405 &stats->xstorm_common.client_statistics[cl_id];
4406 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4407 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4408 u32 diff;
4409
4410 /* are storm stats valid? */
4411 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4412 bp->stats_counter) {
4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4414 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4415 i, xclient->stats_counter, bp->stats_counter);
4416 return -1;
4417 }
4418 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4419 bp->stats_counter) {
4420 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4421 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4422 i, tclient->stats_counter, bp->stats_counter);
4423 return -2;
4424 }
4425 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4426 bp->stats_counter) {
4427 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4428 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4429 i, uclient->stats_counter, bp->stats_counter);
4430 return -4;
4431 }
4432
4433 qstats->total_bytes_received_hi =
4434 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4435 qstats->total_bytes_received_lo =
4436 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4437
4438 ADD_64(qstats->total_bytes_received_hi,
4439 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4440 qstats->total_bytes_received_lo,
4441 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4442
4443 ADD_64(qstats->total_bytes_received_hi,
4444 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4445 qstats->total_bytes_received_lo,
4446 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4447
4448 SUB_64(qstats->total_bytes_received_hi,
4449 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4450 qstats->total_bytes_received_lo,
4451 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4452
4453 SUB_64(qstats->total_bytes_received_hi,
4454 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4455 qstats->total_bytes_received_lo,
4456 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4457
4458 SUB_64(qstats->total_bytes_received_hi,
4459 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4460 qstats->total_bytes_received_lo,
4461 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4462
4463 qstats->valid_bytes_received_hi =
4464 qstats->total_bytes_received_hi;
4465 qstats->valid_bytes_received_lo =
4466 qstats->total_bytes_received_lo;
4467
4468 qstats->error_bytes_received_hi =
4469 le32_to_cpu(tclient->rcv_error_bytes.hi);
4470 qstats->error_bytes_received_lo =
4471 le32_to_cpu(tclient->rcv_error_bytes.lo);
4472
4473 ADD_64(qstats->total_bytes_received_hi,
4474 qstats->error_bytes_received_hi,
4475 qstats->total_bytes_received_lo,
4476 qstats->error_bytes_received_lo);
4477
4478 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4479 total_unicast_packets_received);
4480 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4481 total_multicast_packets_received);
4482 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4483 total_broadcast_packets_received);
4484 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4485 etherstatsoverrsizepkts);
4486 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4487
4488 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4489 total_unicast_packets_received);
4490 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4491 total_multicast_packets_received);
4492 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4493 total_broadcast_packets_received);
4494 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4495 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4496 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4497
4498 qstats->total_bytes_transmitted_hi =
4499 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4500 qstats->total_bytes_transmitted_lo =
4501 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4502
4503 ADD_64(qstats->total_bytes_transmitted_hi,
4504 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4505 qstats->total_bytes_transmitted_lo,
4506 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4507
4508 ADD_64(qstats->total_bytes_transmitted_hi,
4509 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4510 qstats->total_bytes_transmitted_lo,
4511 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4512
4513 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4514 total_unicast_packets_transmitted);
4515 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4516 total_multicast_packets_transmitted);
4517 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4518 total_broadcast_packets_transmitted);
4519
4520 old_tclient->checksum_discard = tclient->checksum_discard;
4521 old_tclient->ttl0_discard = tclient->ttl0_discard;
4522
4523 ADD_64(fstats->total_bytes_received_hi,
4524 qstats->total_bytes_received_hi,
4525 fstats->total_bytes_received_lo,
4526 qstats->total_bytes_received_lo);
4527 ADD_64(fstats->total_bytes_transmitted_hi,
4528 qstats->total_bytes_transmitted_hi,
4529 fstats->total_bytes_transmitted_lo,
4530 qstats->total_bytes_transmitted_lo);
4531 ADD_64(fstats->total_unicast_packets_received_hi,
4532 qstats->total_unicast_packets_received_hi,
4533 fstats->total_unicast_packets_received_lo,
4534 qstats->total_unicast_packets_received_lo);
4535 ADD_64(fstats->total_multicast_packets_received_hi,
4536 qstats->total_multicast_packets_received_hi,
4537 fstats->total_multicast_packets_received_lo,
4538 qstats->total_multicast_packets_received_lo);
4539 ADD_64(fstats->total_broadcast_packets_received_hi,
4540 qstats->total_broadcast_packets_received_hi,
4541 fstats->total_broadcast_packets_received_lo,
4542 qstats->total_broadcast_packets_received_lo);
4543 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4544 qstats->total_unicast_packets_transmitted_hi,
4545 fstats->total_unicast_packets_transmitted_lo,
4546 qstats->total_unicast_packets_transmitted_lo);
4547 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4548 qstats->total_multicast_packets_transmitted_hi,
4549 fstats->total_multicast_packets_transmitted_lo,
4550 qstats->total_multicast_packets_transmitted_lo);
4551 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4552 qstats->total_broadcast_packets_transmitted_hi,
4553 fstats->total_broadcast_packets_transmitted_lo,
4554 qstats->total_broadcast_packets_transmitted_lo);
4555 ADD_64(fstats->valid_bytes_received_hi,
4556 qstats->valid_bytes_received_hi,
4557 fstats->valid_bytes_received_lo,
4558 qstats->valid_bytes_received_lo);
4559
4560 ADD_64(estats->error_bytes_received_hi,
4561 qstats->error_bytes_received_hi,
4562 estats->error_bytes_received_lo,
4563 qstats->error_bytes_received_lo);
4564 ADD_64(estats->etherstatsoverrsizepkts_hi,
4565 qstats->etherstatsoverrsizepkts_hi,
4566 estats->etherstatsoverrsizepkts_lo,
4567 qstats->etherstatsoverrsizepkts_lo);
4568 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4569 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4570 }
4571
4572 ADD_64(fstats->total_bytes_received_hi,
4573 estats->rx_stat_ifhcinbadoctets_hi,
4574 fstats->total_bytes_received_lo,
4575 estats->rx_stat_ifhcinbadoctets_lo);
4576
4577 memcpy(estats, &(fstats->total_bytes_received_hi),
4578 sizeof(struct host_func_stats) - 2*sizeof(u32));
4579
4580 ADD_64(estats->etherstatsoverrsizepkts_hi,
4581 estats->rx_stat_dot3statsframestoolong_hi,
4582 estats->etherstatsoverrsizepkts_lo,
4583 estats->rx_stat_dot3statsframestoolong_lo);
4584 ADD_64(estats->error_bytes_received_hi,
4585 estats->rx_stat_ifhcinbadoctets_hi,
4586 estats->error_bytes_received_lo,
4587 estats->rx_stat_ifhcinbadoctets_lo);
4588
4589 if (bp->port.pmf) {
4590 estats->mac_filter_discard =
4591 le32_to_cpu(tport->mac_filter_discard);
4592 estats->xxoverflow_discard =
4593 le32_to_cpu(tport->xxoverflow_discard);
4594 estats->brb_truncate_discard =
4595 le32_to_cpu(tport->brb_truncate_discard);
4596 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4597 }
4598
4599 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4600
4601 bp->stats_pending = 0;
4602
4603 return 0;
4604}
4605
4606static void bnx2x_net_stats_update(struct bnx2x *bp)
4607{
4608 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4609 struct net_device_stats *nstats = &bp->dev->stats;
4610 int i;
4611
4612 nstats->rx_packets =
4613 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4614 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4615 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4616
4617 nstats->tx_packets =
4618 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4619 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4620 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4621
4622 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4623
4624 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4625
4626 nstats->rx_dropped = estats->mac_discard;
4627 for_each_queue(bp, i)
4628 nstats->rx_dropped +=
4629 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4630
4631 nstats->tx_dropped = 0;
4632
4633 nstats->multicast =
4634 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4635
4636 nstats->collisions =
4637 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4638
4639 nstats->rx_length_errors =
4640 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4641 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4642 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4643 bnx2x_hilo(&estats->brb_truncate_hi);
4644 nstats->rx_crc_errors =
4645 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4646 nstats->rx_frame_errors =
4647 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4648 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4649 nstats->rx_missed_errors = estats->xxoverflow_discard;
4650
4651 nstats->rx_errors = nstats->rx_length_errors +
4652 nstats->rx_over_errors +
4653 nstats->rx_crc_errors +
4654 nstats->rx_frame_errors +
4655 nstats->rx_fifo_errors +
4656 nstats->rx_missed_errors;
4657
4658 nstats->tx_aborted_errors =
4659 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4660 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4661 nstats->tx_carrier_errors =
4662 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4663 nstats->tx_fifo_errors = 0;
4664 nstats->tx_heartbeat_errors = 0;
4665 nstats->tx_window_errors = 0;
4666
4667 nstats->tx_errors = nstats->tx_aborted_errors +
4668 nstats->tx_carrier_errors +
4669 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4670}
4671
4672static void bnx2x_drv_stats_update(struct bnx2x *bp)
4673{
4674 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4675 int i;
4676
4677 estats->driver_xoff = 0;
4678 estats->rx_err_discard_pkt = 0;
4679 estats->rx_skb_alloc_failed = 0;
4680 estats->hw_csum_err = 0;
4681 for_each_queue(bp, i) {
4682 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4683
4684 estats->driver_xoff += qstats->driver_xoff;
4685 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4686 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4687 estats->hw_csum_err += qstats->hw_csum_err;
4688 }
4689}
4690
4691static void bnx2x_stats_update(struct bnx2x *bp)
4692{
4693 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4694
4695 if (*stats_comp != DMAE_COMP_VAL)
4696 return;
4697
4698 if (bp->port.pmf)
4699 bnx2x_hw_stats_update(bp);
4700
4701 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4702 BNX2X_ERR("storm stats were not updated for 3 times\n");
4703 bnx2x_panic();
4704 return;
4705 }
4706
4707 bnx2x_net_stats_update(bp);
4708 bnx2x_drv_stats_update(bp);
4709
4710 if (netif_msg_timer(bp)) {
4711 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4712 int i;
4713
4714 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4715 bp->dev->name,
4716 estats->brb_drop_lo, estats->brb_truncate_lo);
4717
4718 for_each_queue(bp, i) {
4719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721
4722 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4723 " rx pkt(%lu) rx calls(%lu %lu)\n",
4724 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4725 fp->rx_comp_cons),
4726 le16_to_cpu(*fp->rx_cons_sb),
4727 bnx2x_hilo(&qstats->
4728 total_unicast_packets_received_hi),
4729 fp->rx_calls, fp->rx_pkt);
4730 }
4731
4732 for_each_queue(bp, i) {
4733 struct bnx2x_fastpath *fp = &bp->fp[i];
4734 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4735 struct netdev_queue *txq =
4736 netdev_get_tx_queue(bp->dev, i);
4737
4738 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4739 " tx pkt(%lu) tx calls (%lu)"
4740 " %s (Xoff events %u)\n",
4741 fp->name, bnx2x_tx_avail(fp),
4742 le16_to_cpu(*fp->tx_cons_sb),
4743 bnx2x_hilo(&qstats->
4744 total_unicast_packets_transmitted_hi),
4745 fp->tx_pkt,
4746 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4747 qstats->driver_xoff);
4748 }
4749 }
4750
4751 bnx2x_hw_stats_post(bp);
4752 bnx2x_storm_stats_post(bp);
4753}
4754
4755static void bnx2x_port_stats_stop(struct bnx2x *bp)
4756{
4757 struct dmae_command *dmae;
4758 u32 opcode;
4759 int loader_idx = PMF_DMAE_C(bp);
4760 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4761
4762 bp->executer_idx = 0;
4763
4764 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4765 DMAE_CMD_C_ENABLE |
4766 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4767#ifdef __BIG_ENDIAN
4768 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4769#else
4770 DMAE_CMD_ENDIANITY_DW_SWAP |
4771#endif
4772 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4773 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4774
4775 if (bp->port.port_stx) {
4776
4777 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4778 if (bp->func_stx)
4779 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4780 else
4781 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4782 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4783 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4784 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4785 dmae->dst_addr_hi = 0;
4786 dmae->len = sizeof(struct host_port_stats) >> 2;
4787 if (bp->func_stx) {
4788 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4789 dmae->comp_addr_hi = 0;
4790 dmae->comp_val = 1;
4791 } else {
4792 dmae->comp_addr_lo =
4793 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4794 dmae->comp_addr_hi =
4795 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4796 dmae->comp_val = DMAE_COMP_VAL;
4797
4798 *stats_comp = 0;
4799 }
4800 }
4801
4802 if (bp->func_stx) {
4803
4804 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4805 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4806 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4807 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4808 dmae->dst_addr_lo = bp->func_stx >> 2;
4809 dmae->dst_addr_hi = 0;
4810 dmae->len = sizeof(struct host_func_stats) >> 2;
4811 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4812 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4813 dmae->comp_val = DMAE_COMP_VAL;
4814
4815 *stats_comp = 0;
4816 }
4817}
4818
4819static void bnx2x_stats_stop(struct bnx2x *bp)
4820{
4821 int update = 0;
4822
4823 bnx2x_stats_comp(bp);
4824
4825 if (bp->port.pmf)
4826 update = (bnx2x_hw_stats_update(bp) == 0);
4827
4828 update |= (bnx2x_storm_stats_update(bp) == 0);
4829
4830 if (update) {
4831 bnx2x_net_stats_update(bp);
4832
4833 if (bp->port.pmf)
4834 bnx2x_port_stats_stop(bp);
4835
4836 bnx2x_hw_stats_post(bp);
4837 bnx2x_stats_comp(bp);
4838 }
4839}
4840
4841static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4842{
4843}
4844
4845static const struct {
4846 void (*action)(struct bnx2x *bp);
4847 enum bnx2x_stats_state next_state;
4848} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4849/* state event */
4850{
4851/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4852/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4853/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4854/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4855},
4856{
4857/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4858/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4859/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4860/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4861}
4862};
4863
4864static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4865{
4866 enum bnx2x_stats_state state = bp->stats_state;
4867
4868 if (unlikely(bp->panic))
4869 return;
4870
4871 bnx2x_stats_stm[state][event].action(bp);
4872 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4873
4874 /* Make sure the state has been "changed" */
4875 smp_wmb();
4876
4877 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4878 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4879 state, event, bp->stats_state);
4880}
4881
4882static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4883{
4884 struct dmae_command *dmae;
4885 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4886
4887 /* sanity */
4888 if (!bp->port.pmf || !bp->port.port_stx) {
4889 BNX2X_ERR("BUG!\n");
4890 return;
4891 }
4892
4893 bp->executer_idx = 0;
4894
4895 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4896 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4897 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4898 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4899#ifdef __BIG_ENDIAN
4900 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4901#else
4902 DMAE_CMD_ENDIANITY_DW_SWAP |
4903#endif
4904 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4905 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4906 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4907 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4908 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4909 dmae->dst_addr_hi = 0;
4910 dmae->len = sizeof(struct host_port_stats) >> 2;
4911 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4912 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4913 dmae->comp_val = DMAE_COMP_VAL;
4914
4915 *stats_comp = 0;
4916 bnx2x_hw_stats_post(bp);
4917 bnx2x_stats_comp(bp);
4918}
4919
4920static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4921{
4922 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4923 int port = BP_PORT(bp);
4924 int func;
4925 u32 func_stx;
4926
4927 /* sanity */
4928 if (!bp->port.pmf || !bp->func_stx) {
4929 BNX2X_ERR("BUG!\n");
4930 return;
4931 }
4932
4933 /* save our func_stx */
4934 func_stx = bp->func_stx;
4935
4936 for (vn = VN_0; vn < vn_max; vn++) {
4937 func = 2*vn + port;
4938
4939 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4940 bnx2x_func_stats_init(bp);
4941 bnx2x_hw_stats_post(bp);
4942 bnx2x_stats_comp(bp);
4943 }
4944
4945 /* restore our func_stx */
4946 bp->func_stx = func_stx;
4947}
4948
4949static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4950{
4951 struct dmae_command *dmae = &bp->stats_dmae;
4952 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4953
4954 /* sanity */
4955 if (!bp->func_stx) {
4956 BNX2X_ERR("BUG!\n");
4957 return;
4958 }
4959
4960 bp->executer_idx = 0;
4961 memset(dmae, 0, sizeof(struct dmae_command));
4962
4963 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4964 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4965 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4966#ifdef __BIG_ENDIAN
4967 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4968#else
4969 DMAE_CMD_ENDIANITY_DW_SWAP |
4970#endif
4971 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4972 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4973 dmae->src_addr_lo = bp->func_stx >> 2;
4974 dmae->src_addr_hi = 0;
4975 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4976 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4977 dmae->len = sizeof(struct host_func_stats) >> 2;
4978 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4979 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4980 dmae->comp_val = DMAE_COMP_VAL;
4981
4982 *stats_comp = 0;
4983 bnx2x_hw_stats_post(bp);
4984 bnx2x_stats_comp(bp);
4985}
4986
4987static void bnx2x_stats_init(struct bnx2x *bp)
4988{
4989 int port = BP_PORT(bp);
4990 int func = BP_FUNC(bp);
4991 int i;
4992
4993 bp->stats_pending = 0;
4994 bp->executer_idx = 0;
4995 bp->stats_counter = 0;
4996
4997 /* port and func stats for management */
4998 if (!BP_NOMCP(bp)) {
4999 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
5000 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5001
5002 } else {
5003 bp->port.port_stx = 0;
5004 bp->func_stx = 0;
5005 }
5006 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
5007 bp->port.port_stx, bp->func_stx);
5008
5009 /* port stats */
5010 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
5011 bp->port.old_nig_stats.brb_discard =
5012 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5013 bp->port.old_nig_stats.brb_truncate =
5014 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5015 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5016 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5017 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5018 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5019
5020 /* function stats */
5021 for_each_queue(bp, i) {
5022 struct bnx2x_fastpath *fp = &bp->fp[i];
5023
5024 memset(&fp->old_tclient, 0,
5025 sizeof(struct tstorm_per_client_stats));
5026 memset(&fp->old_uclient, 0,
5027 sizeof(struct ustorm_per_client_stats));
5028 memset(&fp->old_xclient, 0,
5029 sizeof(struct xstorm_per_client_stats));
5030 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5031 }
5032
5033 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5034 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5035
5036 bp->stats_state = STATS_STATE_DISABLED;
5037
5038 if (bp->port.pmf) {
5039 if (bp->port.port_stx)
5040 bnx2x_port_stats_base_init(bp);
5041
5042 if (bp->func_stx)
5043 bnx2x_func_stats_base_init(bp);
5044
5045 } else if (bp->func_stx)
5046 bnx2x_func_stats_base_update(bp);
5047}
5048
5049static void bnx2x_timer(unsigned long data)
5050{
5051 struct bnx2x *bp = (struct bnx2x *) data;
5052
5053 if (!netif_running(bp->dev))
5054 return;
5055
5056 if (atomic_read(&bp->intr_sem) != 0)
5057 goto timer_restart;
5058
5059 if (poll) {
5060 struct bnx2x_fastpath *fp = &bp->fp[0];
5061 int rc;
5062
5063 bnx2x_tx_int(fp);
5064 rc = bnx2x_rx_int(fp, 1000);
5065 }
5066
5067 if (!BP_NOMCP(bp)) {
5068 int func = BP_FUNC(bp);
5069 u32 drv_pulse;
5070 u32 mcp_pulse;
5071
5072 ++bp->fw_drv_pulse_wr_seq;
5073 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5074 /* TBD - add SYSTEM_TIME */
5075 drv_pulse = bp->fw_drv_pulse_wr_seq;
5076 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5077
5078 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5079 MCP_PULSE_SEQ_MASK);
5080 /* The delta between driver pulse and mcp response
5081 * should be 1 (before mcp response) or 0 (after mcp response)
5082 */
5083 if ((drv_pulse != mcp_pulse) &&
5084 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5085 /* someone lost a heartbeat... */
5086 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5087 drv_pulse, mcp_pulse);
5088 }
5089 }
5090
5091 if (bp->state == BNX2X_STATE_OPEN)
5092 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5093
5094timer_restart:
5095 mod_timer(&bp->timer, jiffies + bp->current_interval);
5096}
5097
5098/* end of Statistics */
5099
5100/* nic init */
5101
5102/*
5103 * nic init service functions
5104 */
5105
5106static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5107{
5108 int port = BP_PORT(bp);
5109
5110 /* "CSTORM" */
5111 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5113 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5114 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5116 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5117}
5118
5119static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5120 dma_addr_t mapping, int sb_id)
5121{
5122 int port = BP_PORT(bp);
5123 int func = BP_FUNC(bp);
5124 int index;
5125 u64 section;
5126
5127 /* USTORM */
5128 section = ((u64)mapping) + offsetof(struct host_status_block,
5129 u_status_block);
5130 sb->u_status_block.status_block_id = sb_id;
5131
5132 REG_WR(bp, BAR_CSTRORM_INTMEM +
5133 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5134 REG_WR(bp, BAR_CSTRORM_INTMEM +
5135 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5136 U64_HI(section));
5137 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5138 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5139
5140 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5141 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5142 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5143
5144 /* CSTORM */
5145 section = ((u64)mapping) + offsetof(struct host_status_block,
5146 c_status_block);
5147 sb->c_status_block.status_block_id = sb_id;
5148
5149 REG_WR(bp, BAR_CSTRORM_INTMEM +
5150 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5151 REG_WR(bp, BAR_CSTRORM_INTMEM +
5152 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5153 U64_HI(section));
5154 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5155 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5156
5157 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5158 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5159 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5160
5161 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5162}
5163
5164static void bnx2x_zero_def_sb(struct bnx2x *bp)
5165{
5166 int func = BP_FUNC(bp);
5167
5168 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5169 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5170 sizeof(struct tstorm_def_status_block)/4);
5171 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5173 sizeof(struct cstorm_def_status_block_u)/4);
5174 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5175 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5176 sizeof(struct cstorm_def_status_block_c)/4);
5177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5178 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5179 sizeof(struct xstorm_def_status_block)/4);
5180}
5181
5182static void bnx2x_init_def_sb(struct bnx2x *bp,
5183 struct host_def_status_block *def_sb,
5184 dma_addr_t mapping, int sb_id)
5185{
5186 int port = BP_PORT(bp);
5187 int func = BP_FUNC(bp);
5188 int index, val, reg_offset;
5189 u64 section;
5190
5191 /* ATTN */
5192 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5193 atten_status_block);
5194 def_sb->atten_status_block.status_block_id = sb_id;
5195
5196 bp->attn_state = 0;
5197
5198 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5199 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5200
5201 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5202 bp->attn_group[index].sig[0] = REG_RD(bp,
5203 reg_offset + 0x10*index);
5204 bp->attn_group[index].sig[1] = REG_RD(bp,
5205 reg_offset + 0x4 + 0x10*index);
5206 bp->attn_group[index].sig[2] = REG_RD(bp,
5207 reg_offset + 0x8 + 0x10*index);
5208 bp->attn_group[index].sig[3] = REG_RD(bp,
5209 reg_offset + 0xc + 0x10*index);
5210 }
5211
5212 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5213 HC_REG_ATTN_MSG0_ADDR_L);
5214
5215 REG_WR(bp, reg_offset, U64_LO(section));
5216 REG_WR(bp, reg_offset + 4, U64_HI(section));
5217
5218 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5219
5220 val = REG_RD(bp, reg_offset);
5221 val |= sb_id;
5222 REG_WR(bp, reg_offset, val);
5223
5224 /* USTORM */
5225 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5226 u_def_status_block);
5227 def_sb->u_def_status_block.status_block_id = sb_id;
5228
5229 REG_WR(bp, BAR_CSTRORM_INTMEM +
5230 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5231 REG_WR(bp, BAR_CSTRORM_INTMEM +
5232 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5233 U64_HI(section));
5234 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5235 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5236
5237 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5238 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5239 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5240
5241 /* CSTORM */
5242 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5243 c_def_status_block);
5244 def_sb->c_def_status_block.status_block_id = sb_id;
5245
5246 REG_WR(bp, BAR_CSTRORM_INTMEM +
5247 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5248 REG_WR(bp, BAR_CSTRORM_INTMEM +
5249 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5250 U64_HI(section));
5251 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5252 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5253
5254 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5255 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5256 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5257
5258 /* TSTORM */
5259 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5260 t_def_status_block);
5261 def_sb->t_def_status_block.status_block_id = sb_id;
5262
5263 REG_WR(bp, BAR_TSTRORM_INTMEM +
5264 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5265 REG_WR(bp, BAR_TSTRORM_INTMEM +
5266 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5267 U64_HI(section));
5268 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5269 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5270
5271 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5272 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5273 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5274
5275 /* XSTORM */
5276 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5277 x_def_status_block);
5278 def_sb->x_def_status_block.status_block_id = sb_id;
5279
5280 REG_WR(bp, BAR_XSTRORM_INTMEM +
5281 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5282 REG_WR(bp, BAR_XSTRORM_INTMEM +
5283 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5284 U64_HI(section));
5285 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5286 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5287
5288 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5289 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5290 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5291
5292 bp->stats_pending = 0;
5293 bp->set_mac_pending = 0;
5294
5295 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5296}
5297
5298static void bnx2x_update_coalesce(struct bnx2x *bp)
5299{
5300 int port = BP_PORT(bp);
5301 int i;
5302
5303 for_each_queue(bp, i) {
5304 int sb_id = bp->fp[i].sb_id;
5305
5306 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5307 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5308 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5309 U_SB_ETH_RX_CQ_INDEX),
5310 bp->rx_ticks/(4 * BNX2X_BTR));
5311 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5312 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5313 U_SB_ETH_RX_CQ_INDEX),
5314 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5315
5316 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5317 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5318 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5319 C_SB_ETH_TX_CQ_INDEX),
5320 bp->tx_ticks/(4 * BNX2X_BTR));
5321 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5322 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5323 C_SB_ETH_TX_CQ_INDEX),
5324 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5325 }
5326}
5327
5328static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5329 struct bnx2x_fastpath *fp, int last)
5330{
5331 int i;
5332
5333 for (i = 0; i < last; i++) {
5334 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5335 struct sk_buff *skb = rx_buf->skb;
5336
5337 if (skb == NULL) {
5338 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5339 continue;
5340 }
5341
5342 if (fp->tpa_state[i] == BNX2X_TPA_START)
5343 dma_unmap_single(&bp->pdev->dev,
5344 dma_unmap_addr(rx_buf, mapping),
5345 bp->rx_buf_size, DMA_FROM_DEVICE);
5346
5347 dev_kfree_skb(skb);
5348 rx_buf->skb = NULL;
5349 }
5350}
5351
5352static void bnx2x_init_rx_rings(struct bnx2x *bp)
5353{
5354 int func = BP_FUNC(bp);
5355 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5356 ETH_MAX_AGGREGATION_QUEUES_E1H;
5357 u16 ring_prod, cqe_ring_prod;
5358 int i, j;
5359
5360 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5361 DP(NETIF_MSG_IFUP,
5362 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5363
5364 if (bp->flags & TPA_ENABLE_FLAG) {
5365
5366 for_each_queue(bp, j) {
5367 struct bnx2x_fastpath *fp = &bp->fp[j];
5368
5369 for (i = 0; i < max_agg_queues; i++) {
5370 fp->tpa_pool[i].skb =
5371 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5372 if (!fp->tpa_pool[i].skb) {
5373 BNX2X_ERR("Failed to allocate TPA "
5374 "skb pool for queue[%d] - "
5375 "disabling TPA on this "
5376 "queue!\n", j);
5377 bnx2x_free_tpa_pool(bp, fp, i);
5378 fp->disable_tpa = 1;
5379 break;
5380 }
5381 dma_unmap_addr_set((struct sw_rx_bd *)
5382 &bp->fp->tpa_pool[i],
5383 mapping, 0);
5384 fp->tpa_state[i] = BNX2X_TPA_STOP;
5385 }
5386 }
5387 }
5388
5389 for_each_queue(bp, j) {
5390 struct bnx2x_fastpath *fp = &bp->fp[j];
5391
5392 fp->rx_bd_cons = 0;
5393 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5394 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5395
5396 /* "next page" elements initialization */
5397 /* SGE ring */
5398 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5399 struct eth_rx_sge *sge;
5400
5401 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5402 sge->addr_hi =
5403 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5404 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5405 sge->addr_lo =
5406 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5407 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5408 }
5409
5410 bnx2x_init_sge_ring_bit_mask(fp);
5411
5412 /* RX BD ring */
5413 for (i = 1; i <= NUM_RX_RINGS; i++) {
5414 struct eth_rx_bd *rx_bd;
5415
5416 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5417 rx_bd->addr_hi =
5418 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5419 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5420 rx_bd->addr_lo =
5421 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5422 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5423 }
5424
5425 /* CQ ring */
5426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5427 struct eth_rx_cqe_next_page *nextpg;
5428
5429 nextpg = (struct eth_rx_cqe_next_page *)
5430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5431 nextpg->addr_hi =
5432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5434 nextpg->addr_lo =
5435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5437 }
5438
5439 /* Allocate SGEs and initialize the ring elements */
5440 for (i = 0, ring_prod = 0;
5441 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5442
5443 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5444 BNX2X_ERR("was only able to allocate "
5445 "%d rx sges\n", i);
5446 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5447 /* Cleanup already allocated elements */
5448 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5449 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5450 fp->disable_tpa = 1;
5451 ring_prod = 0;
5452 break;
5453 }
5454 ring_prod = NEXT_SGE_IDX(ring_prod);
5455 }
5456 fp->rx_sge_prod = ring_prod;
5457
5458 /* Allocate BDs and initialize BD ring */
5459 fp->rx_comp_cons = 0;
5460 cqe_ring_prod = ring_prod = 0;
5461 for (i = 0; i < bp->rx_ring_size; i++) {
5462 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5463 BNX2X_ERR("was only able to allocate "
5464 "%d rx skbs on queue[%d]\n", i, j);
5465 fp->eth_q_stats.rx_skb_alloc_failed++;
5466 break;
5467 }
5468 ring_prod = NEXT_RX_IDX(ring_prod);
5469 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5470 WARN_ON(ring_prod <= i);
5471 }
5472
5473 fp->rx_bd_prod = ring_prod;
5474 /* must not have more available CQEs than BDs */
5475 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5476 cqe_ring_prod);
5477 fp->rx_pkt = fp->rx_calls = 0;
5478
5479 /* Warning!
5480 * this will generate an interrupt (to the TSTORM)
5481 * must only be done after chip is initialized
5482 */
5483 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5484 fp->rx_sge_prod);
5485 if (j != 0)
5486 continue;
5487
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5490 U64_LO(fp->rx_comp_mapping));
5491 REG_WR(bp, BAR_USTRORM_INTMEM +
5492 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5493 U64_HI(fp->rx_comp_mapping));
5494 }
5495}
5496
5497static void bnx2x_init_tx_ring(struct bnx2x *bp)
5498{
5499 int i, j;
5500
5501 for_each_queue(bp, j) {
5502 struct bnx2x_fastpath *fp = &bp->fp[j];
5503
5504 for (i = 1; i <= NUM_TX_RINGS; i++) {
5505 struct eth_tx_next_bd *tx_next_bd =
5506 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5507
5508 tx_next_bd->addr_hi =
5509 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5510 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5511 tx_next_bd->addr_lo =
5512 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5513 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5514 }
5515
5516 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5517 fp->tx_db.data.zero_fill1 = 0;
5518 fp->tx_db.data.prod = 0;
5519
5520 fp->tx_pkt_prod = 0;
5521 fp->tx_pkt_cons = 0;
5522 fp->tx_bd_prod = 0;
5523 fp->tx_bd_cons = 0;
5524 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5525 fp->tx_pkt = 0;
5526 }
5527}
5528
5529static void bnx2x_init_sp_ring(struct bnx2x *bp)
5530{
5531 int func = BP_FUNC(bp);
5532
5533 spin_lock_init(&bp->spq_lock);
5534
5535 bp->spq_left = MAX_SPQ_PENDING;
5536 bp->spq_prod_idx = 0;
5537 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5538 bp->spq_prod_bd = bp->spq;
5539 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5540
5541 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5542 U64_LO(bp->spq_mapping));
5543 REG_WR(bp,
5544 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5545 U64_HI(bp->spq_mapping));
5546
5547 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5548 bp->spq_prod_idx);
5549}
5550
5551static void bnx2x_init_context(struct bnx2x *bp)
5552{
5553 int i;
5554
5555 /* Rx */
5556 for_each_queue(bp, i) {
5557 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5558 struct bnx2x_fastpath *fp = &bp->fp[i];
5559 u8 cl_id = fp->cl_id;
5560
5561 context->ustorm_st_context.common.sb_index_numbers =
5562 BNX2X_RX_SB_INDEX_NUM;
5563 context->ustorm_st_context.common.clientId = cl_id;
5564 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5565 context->ustorm_st_context.common.flags =
5566 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5567 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5568 context->ustorm_st_context.common.statistics_counter_id =
5569 cl_id;
5570 context->ustorm_st_context.common.mc_alignment_log_size =
5571 BNX2X_RX_ALIGN_SHIFT;
5572 context->ustorm_st_context.common.bd_buff_size =
5573 bp->rx_buf_size;
5574 context->ustorm_st_context.common.bd_page_base_hi =
5575 U64_HI(fp->rx_desc_mapping);
5576 context->ustorm_st_context.common.bd_page_base_lo =
5577 U64_LO(fp->rx_desc_mapping);
5578 if (!fp->disable_tpa) {
5579 context->ustorm_st_context.common.flags |=
5580 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5581 context->ustorm_st_context.common.sge_buff_size =
5582 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5583 0xffff);
5584 context->ustorm_st_context.common.sge_page_base_hi =
5585 U64_HI(fp->rx_sge_mapping);
5586 context->ustorm_st_context.common.sge_page_base_lo =
5587 U64_LO(fp->rx_sge_mapping);
5588
5589 context->ustorm_st_context.common.max_sges_for_packet =
5590 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5591 context->ustorm_st_context.common.max_sges_for_packet =
5592 ((context->ustorm_st_context.common.
5593 max_sges_for_packet + PAGES_PER_SGE - 1) &
5594 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5595 }
5596
5597 context->ustorm_ag_context.cdu_usage =
5598 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5599 CDU_REGION_NUMBER_UCM_AG,
5600 ETH_CONNECTION_TYPE);
5601
5602 context->xstorm_ag_context.cdu_reserved =
5603 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5604 CDU_REGION_NUMBER_XCM_AG,
5605 ETH_CONNECTION_TYPE);
5606 }
5607
5608 /* Tx */
5609 for_each_queue(bp, i) {
5610 struct bnx2x_fastpath *fp = &bp->fp[i];
5611 struct eth_context *context =
5612 bnx2x_sp(bp, context[i].eth);
5613
5614 context->cstorm_st_context.sb_index_number =
5615 C_SB_ETH_TX_CQ_INDEX;
5616 context->cstorm_st_context.status_block_id = fp->sb_id;
5617
5618 context->xstorm_st_context.tx_bd_page_base_hi =
5619 U64_HI(fp->tx_desc_mapping);
5620 context->xstorm_st_context.tx_bd_page_base_lo =
5621 U64_LO(fp->tx_desc_mapping);
5622 context->xstorm_st_context.statistics_data = (fp->cl_id |
5623 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5624 }
5625}
5626
5627static void bnx2x_init_ind_table(struct bnx2x *bp)
5628{
5629 int func = BP_FUNC(bp);
5630 int i;
5631
5632 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5633 return;
5634
5635 DP(NETIF_MSG_IFUP,
5636 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5637 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5638 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5639 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5640 bp->fp->cl_id + (i % bp->num_queues));
5641}
5642
5643static void bnx2x_set_client_config(struct bnx2x *bp)
5644{
5645 struct tstorm_eth_client_config tstorm_client = {0};
5646 int port = BP_PORT(bp);
5647 int i;
5648
5649 tstorm_client.mtu = bp->dev->mtu;
5650 tstorm_client.config_flags =
5651 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5652 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5653#ifdef BCM_VLAN
5654 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5655 tstorm_client.config_flags |=
5656 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5657 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5658 }
5659#endif
5660
5661 for_each_queue(bp, i) {
5662 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5663
5664 REG_WR(bp, BAR_TSTRORM_INTMEM +
5665 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5666 ((u32 *)&tstorm_client)[0]);
5667 REG_WR(bp, BAR_TSTRORM_INTMEM +
5668 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5669 ((u32 *)&tstorm_client)[1]);
5670 }
5671
5672 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5673 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5674}
5675
5676static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5677{
5678 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5679 int mode = bp->rx_mode;
5680 int mask = bp->rx_mode_cl_mask;
5681 int func = BP_FUNC(bp);
5682 int port = BP_PORT(bp);
5683 int i;
5684 /* All but management unicast packets should pass to the host as well */
5685 u32 llh_mask =
5686 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5687 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5688 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5689 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5690
5691 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5692
5693 switch (mode) {
5694 case BNX2X_RX_MODE_NONE: /* no Rx */
5695 tstorm_mac_filter.ucast_drop_all = mask;
5696 tstorm_mac_filter.mcast_drop_all = mask;
5697 tstorm_mac_filter.bcast_drop_all = mask;
5698 break;
5699
5700 case BNX2X_RX_MODE_NORMAL:
5701 tstorm_mac_filter.bcast_accept_all = mask;
5702 break;
5703
5704 case BNX2X_RX_MODE_ALLMULTI:
5705 tstorm_mac_filter.mcast_accept_all = mask;
5706 tstorm_mac_filter.bcast_accept_all = mask;
5707 break;
5708
5709 case BNX2X_RX_MODE_PROMISC:
5710 tstorm_mac_filter.ucast_accept_all = mask;
5711 tstorm_mac_filter.mcast_accept_all = mask;
5712 tstorm_mac_filter.bcast_accept_all = mask;
5713 /* pass management unicast packets as well */
5714 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5715 break;
5716
5717 default:
5718 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5719 break;
5720 }
5721
5722 REG_WR(bp,
5723 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5724 llh_mask);
5725
5726 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5727 REG_WR(bp, BAR_TSTRORM_INTMEM +
5728 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5729 ((u32 *)&tstorm_mac_filter)[i]);
5730
5731/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5732 ((u32 *)&tstorm_mac_filter)[i]); */
5733 }
5734
5735 if (mode != BNX2X_RX_MODE_NONE)
5736 bnx2x_set_client_config(bp);
5737}
5738
5739static void bnx2x_init_internal_common(struct bnx2x *bp)
5740{
5741 int i;
5742
5743 /* Zero this manually as its initialization is
5744 currently missing in the initTool */
5745 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5746 REG_WR(bp, BAR_USTRORM_INTMEM +
5747 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5748}
5749
5750static void bnx2x_init_internal_port(struct bnx2x *bp)
5751{
5752 int port = BP_PORT(bp);
5753
5754 REG_WR(bp,
5755 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5756 REG_WR(bp,
5757 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5758 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5759 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5760}
5761
5762static void bnx2x_init_internal_func(struct bnx2x *bp)
5763{
5764 struct tstorm_eth_function_common_config tstorm_config = {0};
5765 struct stats_indication_flags stats_flags = {0};
5766 int port = BP_PORT(bp);
5767 int func = BP_FUNC(bp);
5768 int i, j;
5769 u32 offset;
5770 u16 max_agg_size;
5771
5772 tstorm_config.config_flags = RSS_FLAGS(bp);
5773
5774 if (is_multi(bp))
5775 tstorm_config.rss_result_mask = MULTI_MASK;
5776
5777 /* Enable TPA if needed */
5778 if (bp->flags & TPA_ENABLE_FLAG)
5779 tstorm_config.config_flags |=
5780 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5781
5782 if (IS_E1HMF(bp))
5783 tstorm_config.config_flags |=
5784 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5785
5786 tstorm_config.leading_client_id = BP_L_ID(bp);
5787
5788 REG_WR(bp, BAR_TSTRORM_INTMEM +
5789 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5790 (*(u32 *)&tstorm_config));
5791
5792 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5793 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5794 bnx2x_set_storm_rx_mode(bp);
5795
5796 for_each_queue(bp, i) {
5797 u8 cl_id = bp->fp[i].cl_id;
5798
5799 /* reset xstorm per client statistics */
5800 offset = BAR_XSTRORM_INTMEM +
5801 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5802 for (j = 0;
5803 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5804 REG_WR(bp, offset + j*4, 0);
5805
5806 /* reset tstorm per client statistics */
5807 offset = BAR_TSTRORM_INTMEM +
5808 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5809 for (j = 0;
5810 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5811 REG_WR(bp, offset + j*4, 0);
5812
5813 /* reset ustorm per client statistics */
5814 offset = BAR_USTRORM_INTMEM +
5815 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5816 for (j = 0;
5817 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5818 REG_WR(bp, offset + j*4, 0);
5819 }
5820
5821 /* Init statistics related context */
5822 stats_flags.collect_eth = 1;
5823
5824 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5825 ((u32 *)&stats_flags)[0]);
5826 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5827 ((u32 *)&stats_flags)[1]);
5828
5829 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5830 ((u32 *)&stats_flags)[0]);
5831 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5832 ((u32 *)&stats_flags)[1]);
5833
5834 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5835 ((u32 *)&stats_flags)[0]);
5836 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5837 ((u32 *)&stats_flags)[1]);
5838
5839 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5840 ((u32 *)&stats_flags)[0]);
5841 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5842 ((u32 *)&stats_flags)[1]);
5843
5844 REG_WR(bp, BAR_XSTRORM_INTMEM +
5845 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847 REG_WR(bp, BAR_XSTRORM_INTMEM +
5848 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5850
5851 REG_WR(bp, BAR_TSTRORM_INTMEM +
5852 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5853 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5854 REG_WR(bp, BAR_TSTRORM_INTMEM +
5855 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5856 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5857
5858 REG_WR(bp, BAR_USTRORM_INTMEM +
5859 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5860 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5861 REG_WR(bp, BAR_USTRORM_INTMEM +
5862 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5863 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5864
5865 if (CHIP_IS_E1H(bp)) {
5866 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5867 IS_E1HMF(bp));
5868 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5869 IS_E1HMF(bp));
5870 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5871 IS_E1HMF(bp));
5872 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5873 IS_E1HMF(bp));
5874
5875 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5876 bp->e1hov);
5877 }
5878
5879 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5880 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5881 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5882 for_each_queue(bp, i) {
5883 struct bnx2x_fastpath *fp = &bp->fp[i];
5884
5885 REG_WR(bp, BAR_USTRORM_INTMEM +
5886 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5887 U64_LO(fp->rx_comp_mapping));
5888 REG_WR(bp, BAR_USTRORM_INTMEM +
5889 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5890 U64_HI(fp->rx_comp_mapping));
5891
5892 /* Next page */
5893 REG_WR(bp, BAR_USTRORM_INTMEM +
5894 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5895 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5896 REG_WR(bp, BAR_USTRORM_INTMEM +
5897 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5898 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5899
5900 REG_WR16(bp, BAR_USTRORM_INTMEM +
5901 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5902 max_agg_size);
5903 }
5904
5905 /* dropless flow control */
5906 if (CHIP_IS_E1H(bp)) {
5907 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5908
5909 rx_pause.bd_thr_low = 250;
5910 rx_pause.cqe_thr_low = 250;
5911 rx_pause.cos = 1;
5912 rx_pause.sge_thr_low = 0;
5913 rx_pause.bd_thr_high = 350;
5914 rx_pause.cqe_thr_high = 350;
5915 rx_pause.sge_thr_high = 0;
5916
5917 for_each_queue(bp, i) {
5918 struct bnx2x_fastpath *fp = &bp->fp[i];
5919
5920 if (!fp->disable_tpa) {
5921 rx_pause.sge_thr_low = 150;
5922 rx_pause.sge_thr_high = 250;
5923 }
5924
5925
5926 offset = BAR_USTRORM_INTMEM +
5927 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5928 fp->cl_id);
5929 for (j = 0;
5930 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5931 j++)
5932 REG_WR(bp, offset + j*4,
5933 ((u32 *)&rx_pause)[j]);
5934 }
5935 }
5936
5937 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5938
5939 /* Init rate shaping and fairness contexts */
5940 if (IS_E1HMF(bp)) {
5941 int vn;
5942
5943 /* During init there is no active link
5944 Until link is up, set link rate to 10Gbps */
5945 bp->link_vars.line_speed = SPEED_10000;
5946 bnx2x_init_port_minmax(bp);
5947
5948 if (!BP_NOMCP(bp))
5949 bp->mf_config =
5950 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5951 bnx2x_calc_vn_weight_sum(bp);
5952
5953 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5954 bnx2x_init_vn_minmax(bp, 2*vn + port);
5955
5956 /* Enable rate shaping and fairness */
5957 bp->cmng.flags.cmng_enables |=
5958 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5959
5960 } else {
5961 /* rate shaping and fairness are disabled */
5962 DP(NETIF_MSG_IFUP,
5963 "single function mode minmax will be disabled\n");
5964 }
5965
5966
5967 /* Store cmng structures to internal memory */
5968 if (bp->port.pmf)
5969 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5970 REG_WR(bp, BAR_XSTRORM_INTMEM +
5971 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5972 ((u32 *)(&bp->cmng))[i]);
5973}
5974
5975static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5976{
5977 switch (load_code) {
5978 case FW_MSG_CODE_DRV_LOAD_COMMON:
5979 bnx2x_init_internal_common(bp);
5980 /* no break */
5981
5982 case FW_MSG_CODE_DRV_LOAD_PORT:
5983 bnx2x_init_internal_port(bp);
5984 /* no break */
5985
5986 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5987 bnx2x_init_internal_func(bp);
5988 break;
5989
5990 default:
5991 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5992 break;
5993 }
5994}
5995
5996static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5997{
5998 int i;
5999
6000 for_each_queue(bp, i) {
6001 struct bnx2x_fastpath *fp = &bp->fp[i];
6002
6003 fp->bp = bp;
6004 fp->state = BNX2X_FP_STATE_CLOSED;
6005 fp->index = i;
6006 fp->cl_id = BP_L_ID(bp) + i;
6007#ifdef BCM_CNIC
6008 fp->sb_id = fp->cl_id + 1;
6009#else
6010 fp->sb_id = fp->cl_id;
6011#endif
6012 DP(NETIF_MSG_IFUP,
6013 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
6014 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6015 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6016 fp->sb_id);
6017 bnx2x_update_fpsb_idx(fp);
6018 }
6019
6020 /* ensure status block indices were read */
6021 rmb();
6022
6023
6024 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6025 DEF_SB_ID);
6026 bnx2x_update_dsb_idx(bp);
6027 bnx2x_update_coalesce(bp);
6028 bnx2x_init_rx_rings(bp);
6029 bnx2x_init_tx_ring(bp);
6030 bnx2x_init_sp_ring(bp);
6031 bnx2x_init_context(bp);
6032 bnx2x_init_internal(bp, load_code);
6033 bnx2x_init_ind_table(bp);
6034 bnx2x_stats_init(bp);
6035
6036 /* At this point, we are ready for interrupts */
6037 atomic_set(&bp->intr_sem, 0);
6038
6039 /* flush all before enabling interrupts */
6040 mb();
6041 mmiowb();
6042
6043 bnx2x_int_enable(bp);
6044
6045 /* Check for SPIO5 */
6046 bnx2x_attn_int_deasserted0(bp,
6047 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6048 AEU_INPUTS_ATTN_BITS_SPIO5);
6049}
6050
6051/* end of nic init */
6052
6053/*
6054 * gzip service functions
6055 */
6056
6057static int bnx2x_gunzip_init(struct bnx2x *bp)
6058{
6059 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6060 &bp->gunzip_mapping, GFP_KERNEL);
6061 if (bp->gunzip_buf == NULL)
6062 goto gunzip_nomem1;
6063
6064 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6065 if (bp->strm == NULL)
6066 goto gunzip_nomem2;
6067
6068 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6069 GFP_KERNEL);
6070 if (bp->strm->workspace == NULL)
6071 goto gunzip_nomem3;
6072
6073 return 0;
6074
6075gunzip_nomem3:
6076 kfree(bp->strm);
6077 bp->strm = NULL;
6078
6079gunzip_nomem2:
6080 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6081 bp->gunzip_mapping);
6082 bp->gunzip_buf = NULL;
6083
6084gunzip_nomem1:
6085 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6086 " un-compression\n");
6087 return -ENOMEM;
6088}
6089
6090static void bnx2x_gunzip_end(struct bnx2x *bp)
6091{
6092 kfree(bp->strm->workspace);
6093
6094 kfree(bp->strm);
6095 bp->strm = NULL;
6096
6097 if (bp->gunzip_buf) {
6098 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6099 bp->gunzip_mapping);
6100 bp->gunzip_buf = NULL;
6101 }
6102}
6103
6104static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6105{
6106 int n, rc;
6107
6108 /* check gzip header */
6109 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6110 BNX2X_ERR("Bad gzip header\n");
6111 return -EINVAL;
6112 }
6113
6114 n = 10;
6115
6116#define FNAME 0x8
6117
6118 if (zbuf[3] & FNAME)
6119 while ((zbuf[n++] != 0) && (n < len));
6120
6121 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6122 bp->strm->avail_in = len - n;
6123 bp->strm->next_out = bp->gunzip_buf;
6124 bp->strm->avail_out = FW_BUF_SIZE;
6125
6126 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6127 if (rc != Z_OK)
6128 return rc;
6129
6130 rc = zlib_inflate(bp->strm, Z_FINISH);
6131 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6132 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6133 bp->strm->msg);
6134
6135 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6136 if (bp->gunzip_outlen & 0x3)
6137 netdev_err(bp->dev, "Firmware decompression error:"
6138 " gunzip_outlen (%d) not aligned\n",
6139 bp->gunzip_outlen);
6140 bp->gunzip_outlen >>= 2;
6141
6142 zlib_inflateEnd(bp->strm);
6143
6144 if (rc == Z_STREAM_END)
6145 return 0;
6146
6147 return rc;
6148}
6149
6150/* nic load/unload */
6151
6152/*
6153 * General service functions
6154 */
6155
6156/* send a NIG loopback debug packet */
6157static void bnx2x_lb_pckt(struct bnx2x *bp)
6158{
6159 u32 wb_write[3];
6160
6161 /* Ethernet source and destination addresses */
6162 wb_write[0] = 0x55555555;
6163 wb_write[1] = 0x55555555;
6164 wb_write[2] = 0x20; /* SOP */
6165 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6166
6167 /* NON-IP protocol */
6168 wb_write[0] = 0x09000000;
6169 wb_write[1] = 0x55555555;
6170 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6171 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6172}
6173
6174/* some of the internal memories
6175 * are not directly readable from the driver
6176 * to test them we send debug packets
6177 */
6178static int bnx2x_int_mem_test(struct bnx2x *bp)
6179{
6180 int factor;
6181 int count, i;
6182 u32 val = 0;
6183
6184 if (CHIP_REV_IS_FPGA(bp))
6185 factor = 120;
6186 else if (CHIP_REV_IS_EMUL(bp))
6187 factor = 200;
6188 else
6189 factor = 1;
6190
6191 DP(NETIF_MSG_HW, "start part1\n");
6192
6193 /* Disable inputs of parser neighbor blocks */
6194 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6195 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6196 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6197 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6198
6199 /* Write 0 to parser credits for CFC search request */
6200 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6201
6202 /* send Ethernet packet */
6203 bnx2x_lb_pckt(bp);
6204
6205 /* TODO do i reset NIG statistic? */
6206 /* Wait until NIG register shows 1 packet of size 0x10 */
6207 count = 1000 * factor;
6208 while (count) {
6209
6210 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6211 val = *bnx2x_sp(bp, wb_data[0]);
6212 if (val == 0x10)
6213 break;
6214
6215 msleep(10);
6216 count--;
6217 }
6218 if (val != 0x10) {
6219 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6220 return -1;
6221 }
6222
6223 /* Wait until PRS register shows 1 packet */
6224 count = 1000 * factor;
6225 while (count) {
6226 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6227 if (val == 1)
6228 break;
6229
6230 msleep(10);
6231 count--;
6232 }
6233 if (val != 0x1) {
6234 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6235 return -2;
6236 }
6237
6238 /* Reset and init BRB, PRS */
6239 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6240 msleep(50);
6241 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6242 msleep(50);
6243 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6244 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6245
6246 DP(NETIF_MSG_HW, "part2\n");
6247
6248 /* Disable inputs of parser neighbor blocks */
6249 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6250 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6251 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6252 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6253
6254 /* Write 0 to parser credits for CFC search request */
6255 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6256
6257 /* send 10 Ethernet packets */
6258 for (i = 0; i < 10; i++)
6259 bnx2x_lb_pckt(bp);
6260
6261 /* Wait until NIG register shows 10 + 1
6262 packets of size 11*0x10 = 0xb0 */
6263 count = 1000 * factor;
6264 while (count) {
6265
6266 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6267 val = *bnx2x_sp(bp, wb_data[0]);
6268 if (val == 0xb0)
6269 break;
6270
6271 msleep(10);
6272 count--;
6273 }
6274 if (val != 0xb0) {
6275 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6276 return -3;
6277 }
6278
6279 /* Wait until PRS register shows 2 packets */
6280 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6281 if (val != 2)
6282 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6283
6284 /* Write 1 to parser credits for CFC search request */
6285 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6286
6287 /* Wait until PRS register shows 3 packets */
6288 msleep(10 * factor);
6289 /* Wait until NIG register shows 1 packet of size 0x10 */
6290 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6291 if (val != 3)
6292 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6293
6294 /* clear NIG EOP FIFO */
6295 for (i = 0; i < 11; i++)
6296 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6297 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6298 if (val != 1) {
6299 BNX2X_ERR("clear of NIG failed\n");
6300 return -4;
6301 }
6302
6303 /* Reset and init BRB, PRS, NIG */
6304 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6305 msleep(50);
6306 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6307 msleep(50);
6308 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6309 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6310#ifndef BCM_CNIC
6311 /* set NIC mode */
6312 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6313#endif
6314
6315 /* Enable inputs of parser neighbor blocks */
6316 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6317 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6318 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6319 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6320
6321 DP(NETIF_MSG_HW, "done\n");
6322
6323 return 0; /* OK */
6324}
6325
6326static void enable_blocks_attention(struct bnx2x *bp)
6327{
6328 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6329 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6330 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6331 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6332 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6333 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6334 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6335 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6336 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6337/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6338/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6339 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6340 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6341 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6342/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6343/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6344 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6345 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6346 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6347 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6348/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6349/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6350 if (CHIP_REV_IS_FPGA(bp))
6351 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6352 else
6353 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6354 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6355 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6356 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6357/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6358/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6359 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6360 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6361/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6362 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6363}
6364
6365static const struct {
6366 u32 addr;
6367 u32 mask;
6368} bnx2x_parity_mask[] = {
6369 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6370 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6371 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6372 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6373 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6374 {QM_REG_QM_PRTY_MASK, 0x0},
6375 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6376 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6377 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6378 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6379 {CDU_REG_CDU_PRTY_MASK, 0x0},
6380 {CFC_REG_CFC_PRTY_MASK, 0x0},
6381 {DBG_REG_DBG_PRTY_MASK, 0x0},
6382 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6383 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6384 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6385 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6386 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6387 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6388 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6389 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6390 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6391 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6392 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6393 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6394 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6395 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6396 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6397};
6398
6399static void enable_blocks_parity(struct bnx2x *bp)
6400{
6401 int i, mask_arr_len =
6402 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6403
6404 for (i = 0; i < mask_arr_len; i++)
6405 REG_WR(bp, bnx2x_parity_mask[i].addr,
6406 bnx2x_parity_mask[i].mask);
6407}
6408
6409
6410static void bnx2x_reset_common(struct bnx2x *bp)
6411{
6412 /* reset_common */
6413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6414 0xd3ffff7f);
6415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6416}
6417
6418static void bnx2x_init_pxp(struct bnx2x *bp)
6419{
6420 u16 devctl;
6421 int r_order, w_order;
6422
6423 pci_read_config_word(bp->pdev,
6424 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6425 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6426 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6427 if (bp->mrrs == -1)
6428 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6429 else {
6430 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6431 r_order = bp->mrrs;
6432 }
6433
6434 bnx2x_init_pxp_arb(bp, r_order, w_order);
6435}
6436
6437static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6438{
6439 int is_required;
6440 u32 val;
6441 int port;
6442
6443 if (BP_NOMCP(bp))
6444 return;
6445
6446 is_required = 0;
6447 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6448 SHARED_HW_CFG_FAN_FAILURE_MASK;
6449
6450 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6451 is_required = 1;
6452
6453 /*
6454 * The fan failure mechanism is usually related to the PHY type since
6455 * the power consumption of the board is affected by the PHY. Currently,
6456 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6457 */
6458 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6459 for (port = PORT_0; port < PORT_MAX; port++) {
6460 u32 phy_type =
6461 SHMEM_RD(bp, dev_info.port_hw_config[port].
6462 external_phy_config) &
6463 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6464 is_required |=
6465 ((phy_type ==
6466 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6467 (phy_type ==
6468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6469 (phy_type ==
6470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6471 }
6472
6473 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6474
6475 if (is_required == 0)
6476 return;
6477
6478 /* Fan failure is indicated by SPIO 5 */
6479 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6480 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6481
6482 /* set to active low mode */
6483 val = REG_RD(bp, MISC_REG_SPIO_INT);
6484 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6485 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6486 REG_WR(bp, MISC_REG_SPIO_INT, val);
6487
6488 /* enable interrupt to signal the IGU */
6489 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6490 val |= (1 << MISC_REGISTERS_SPIO_5);
6491 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6492}
6493
6494static int bnx2x_init_common(struct bnx2x *bp)
6495{
6496 u32 val, i;
6497#ifdef BCM_CNIC
6498 u32 wb_write[2];
6499#endif
6500
6501 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6502
6503 bnx2x_reset_common(bp);
6504 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6505 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6506
6507 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6508 if (CHIP_IS_E1H(bp))
6509 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6510
6511 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6512 msleep(30);
6513 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6514
6515 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6516 if (CHIP_IS_E1(bp)) {
6517 /* enable HW interrupt from PXP on USDM overflow
6518 bit 16 on INT_MASK_0 */
6519 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6520 }
6521
6522 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6523 bnx2x_init_pxp(bp);
6524
6525#ifdef __BIG_ENDIAN
6526 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6527 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6528 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6529 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6530 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6531 /* make sure this value is 0 */
6532 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6533
6534/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6535 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6536 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6537 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6538 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6539#endif
6540
6541 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6542#ifdef BCM_CNIC
6543 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6544 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6545 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6546#endif
6547
6548 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6549 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6550
6551 /* let the HW do it's magic ... */
6552 msleep(100);
6553 /* finish PXP init */
6554 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6555 if (val != 1) {
6556 BNX2X_ERR("PXP2 CFG failed\n");
6557 return -EBUSY;
6558 }
6559 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6560 if (val != 1) {
6561 BNX2X_ERR("PXP2 RD_INIT failed\n");
6562 return -EBUSY;
6563 }
6564
6565 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6566 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6567
6568 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6569
6570 /* clean the DMAE memory */
6571 bp->dmae_ready = 1;
6572 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6573
6574 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6575 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6576 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6577 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6578
6579 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6580 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6581 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6582 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6583
6584 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6585
6586#ifdef BCM_CNIC
6587 wb_write[0] = 0;
6588 wb_write[1] = 0;
6589 for (i = 0; i < 64; i++) {
6590 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6591 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6592
6593 if (CHIP_IS_E1H(bp)) {
6594 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6595 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6596 wb_write, 2);
6597 }
6598 }
6599#endif
6600 /* soft reset pulse */
6601 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6602 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6603
6604#ifdef BCM_CNIC
6605 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6606#endif
6607
6608 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6609 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6610 if (!CHIP_REV_IS_SLOW(bp)) {
6611 /* enable hw interrupt from doorbell Q */
6612 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6613 }
6614
6615 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6617 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6618#ifndef BCM_CNIC
6619 /* set NIC mode */
6620 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6621#endif
6622 if (CHIP_IS_E1H(bp))
6623 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6624
6625 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6627 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6628 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6629
6630 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6631 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6632 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6633 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6634
6635 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6636 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6637 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6638 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6639
6640 /* sync semi rtc */
6641 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6642 0x80000000);
6643 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6644 0x80000000);
6645
6646 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6647 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6648 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6649
6650 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6651 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6652 REG_WR(bp, i, random32());
6653 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6654#ifdef BCM_CNIC
6655 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6656 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6657 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6658 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6659 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6660 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6661 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6662 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6663 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6664 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6665#endif
6666 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6667
6668 if (sizeof(union cdu_context) != 1024)
6669 /* we currently assume that a context is 1024 bytes */
6670 dev_alert(&bp->pdev->dev, "please adjust the size "
6671 "of cdu_context(%ld)\n",
6672 (long)sizeof(union cdu_context));
6673
6674 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6675 val = (4 << 24) + (0 << 12) + 1024;
6676 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6677
6678 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6679 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6680 /* enable context validation interrupt from CFC */
6681 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6682
6683 /* set the thresholds to prevent CFC/CDU race */
6684 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6685
6686 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6687 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6688
6689 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6690 /* Reset PCIE errors for debug */
6691 REG_WR(bp, 0x2814, 0xffffffff);
6692 REG_WR(bp, 0x3820, 0xffffffff);
6693
6694 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6695 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6696 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6697 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6698
6699 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6700 if (CHIP_IS_E1H(bp)) {
6701 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6702 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6703 }
6704
6705 if (CHIP_REV_IS_SLOW(bp))
6706 msleep(200);
6707
6708 /* finish CFC init */
6709 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6710 if (val != 1) {
6711 BNX2X_ERR("CFC LL_INIT failed\n");
6712 return -EBUSY;
6713 }
6714 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6715 if (val != 1) {
6716 BNX2X_ERR("CFC AC_INIT failed\n");
6717 return -EBUSY;
6718 }
6719 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6720 if (val != 1) {
6721 BNX2X_ERR("CFC CAM_INIT failed\n");
6722 return -EBUSY;
6723 }
6724 REG_WR(bp, CFC_REG_DEBUG0, 0);
6725
6726 /* read NIG statistic
6727 to see if this is our first up since powerup */
6728 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6729 val = *bnx2x_sp(bp, wb_data[0]);
6730
6731 /* do internal memory self test */
6732 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6733 BNX2X_ERR("internal mem self test failed\n");
6734 return -EBUSY;
6735 }
6736
6737 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6742 bp->port.need_hw_lock = 1;
6743 break;
6744
6745 default:
6746 break;
6747 }
6748
6749 bnx2x_setup_fan_failure_detection(bp);
6750
6751 /* clear PXP2 attentions */
6752 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6753
6754 enable_blocks_attention(bp);
6755 if (CHIP_PARITY_SUPPORTED(bp))
6756 enable_blocks_parity(bp);
6757
6758 if (!BP_NOMCP(bp)) {
6759 bnx2x_acquire_phy_lock(bp);
6760 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6761 bnx2x_release_phy_lock(bp);
6762 } else
6763 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6764
6765 return 0;
6766}
6767
6768static int bnx2x_init_port(struct bnx2x *bp)
6769{
6770 int port = BP_PORT(bp);
6771 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6772 u32 low, high;
6773 u32 val;
6774
6775 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6776
6777 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6778
6779 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6780 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6781
6782 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6783 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6784 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6785 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6786
6787#ifdef BCM_CNIC
6788 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6789
6790 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6793#endif
6794
6795 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6796
6797 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6798 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6799 /* no pause for emulation and FPGA */
6800 low = 0;
6801 high = 513;
6802 } else {
6803 if (IS_E1HMF(bp))
6804 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6805 else if (bp->dev->mtu > 4096) {
6806 if (bp->flags & ONE_PORT_FLAG)
6807 low = 160;
6808 else {
6809 val = bp->dev->mtu;
6810 /* (24*1024 + val*4)/256 */
6811 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6812 }
6813 } else
6814 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6815 high = low + 56; /* 14*1024/256 */
6816 }
6817 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6818 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6819
6820
6821 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6822
6823 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6824 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6825 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6826 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6827
6828 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6829 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6830 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6831 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6832
6833 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6834 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6835
6836 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6837
6838 /* configure PBF to work without PAUSE mtu 9000 */
6839 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6840
6841 /* update threshold */
6842 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6843 /* update init credit */
6844 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6845
6846 /* probe changes */
6847 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6848 msleep(5);
6849 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6850
6851#ifdef BCM_CNIC
6852 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6853#endif
6854 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6855 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6856
6857 if (CHIP_IS_E1(bp)) {
6858 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6859 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6860 }
6861 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6862
6863 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6864 /* init aeu_mask_attn_func_0/1:
6865 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6866 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6867 * bits 4-7 are used for "per vn group attention" */
6868 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6869 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6870
6871 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6872 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6873 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6874 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6875 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6876
6877 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6878
6879 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6880
6881 if (CHIP_IS_E1H(bp)) {
6882 /* 0x2 disable e1hov, 0x1 enable */
6883 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6884 (IS_E1HMF(bp) ? 0x1 : 0x2));
6885
6886 {
6887 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6888 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6889 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6890 }
6891 }
6892
6893 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6894 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6895
6896 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6897 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6898 {
6899 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6900
6901 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6902 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6903
6904 /* The GPIO should be swapped if the swap register is
6905 set and active */
6906 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6908
6909 /* Select function upon port-swap configuration */
6910 if (port == 0) {
6911 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6912 aeu_gpio_mask = (swap_val && swap_override) ?
6913 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6915 } else {
6916 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6917 aeu_gpio_mask = (swap_val && swap_override) ?
6918 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6919 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6920 }
6921 val = REG_RD(bp, offset);
6922 /* add GPIO3 to group */
6923 val |= aeu_gpio_mask;
6924 REG_WR(bp, offset, val);
6925 }
6926 break;
6927
6928 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6929 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6930 /* add SPIO 5 to group 0 */
6931 {
6932 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6933 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6934 val = REG_RD(bp, reg_addr);
6935 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6936 REG_WR(bp, reg_addr, val);
6937 }
6938 break;
6939
6940 default:
6941 break;
6942 }
6943
6944 bnx2x__link_reset(bp);
6945
6946 return 0;
6947}
6948
6949#define ILT_PER_FUNC (768/2)
6950#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6951/* the phys address is shifted right 12 bits and has an added
6952 1=valid bit added to the 53rd bit
6953 then since this is a wide register(TM)
6954 we split it into two 32 bit writes
6955 */
6956#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6957#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6958#define PXP_ONE_ILT(x) (((x) << 10) | x)
6959#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6960
6961#ifdef BCM_CNIC
6962#define CNIC_ILT_LINES 127
6963#define CNIC_CTX_PER_ILT 16
6964#else
6965#define CNIC_ILT_LINES 0
6966#endif
6967
6968static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6969{
6970 int reg;
6971
6972 if (CHIP_IS_E1H(bp))
6973 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6974 else /* E1 */
6975 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6976
6977 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6978}
6979
6980static int bnx2x_init_func(struct bnx2x *bp)
6981{
6982 int port = BP_PORT(bp);
6983 int func = BP_FUNC(bp);
6984 u32 addr, val;
6985 int i;
6986
6987 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6988
6989 /* set MSI reconfigure capability */
6990 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6991 val = REG_RD(bp, addr);
6992 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6993 REG_WR(bp, addr, val);
6994
6995 i = FUNC_ILT_BASE(func);
6996
6997 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6998 if (CHIP_IS_E1H(bp)) {
6999 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
7000 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
7001 } else /* E1 */
7002 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
7003 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
7004
7005#ifdef BCM_CNIC
7006 i += 1 + CNIC_ILT_LINES;
7007 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
7008 if (CHIP_IS_E1(bp))
7009 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
7010 else {
7011 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7012 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7013 }
7014
7015 i++;
7016 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7017 if (CHIP_IS_E1(bp))
7018 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7019 else {
7020 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7021 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7022 }
7023
7024 i++;
7025 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7026 if (CHIP_IS_E1(bp))
7027 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7028 else {
7029 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7030 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7031 }
7032
7033 /* tell the searcher where the T2 table is */
7034 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7035
7036 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7037 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7038
7039 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7040 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7041 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7042
7043 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7044#endif
7045
7046 if (CHIP_IS_E1H(bp)) {
7047 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7048 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7049 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7050 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7051 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7052 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7053 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7054 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7055 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7056
7057 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7058 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7059 }
7060
7061 /* HC init per function */
7062 if (CHIP_IS_E1H(bp)) {
7063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7064
7065 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7066 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7067 }
7068 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7069
7070 /* Reset PCIE errors for debug */
7071 REG_WR(bp, 0x2114, 0xffffffff);
7072 REG_WR(bp, 0x2120, 0xffffffff);
7073
7074 return 0;
7075}
7076
7077static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7078{
7079 int i, rc = 0;
7080
7081 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7082 BP_FUNC(bp), load_code);
7083
7084 bp->dmae_ready = 0;
7085 mutex_init(&bp->dmae_mutex);
7086 rc = bnx2x_gunzip_init(bp);
7087 if (rc)
7088 return rc;
7089
7090 switch (load_code) {
7091 case FW_MSG_CODE_DRV_LOAD_COMMON:
7092 rc = bnx2x_init_common(bp);
7093 if (rc)
7094 goto init_hw_err;
7095 /* no break */
7096
7097 case FW_MSG_CODE_DRV_LOAD_PORT:
7098 bp->dmae_ready = 1;
7099 rc = bnx2x_init_port(bp);
7100 if (rc)
7101 goto init_hw_err;
7102 /* no break */
7103
7104 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7105 bp->dmae_ready = 1;
7106 rc = bnx2x_init_func(bp);
7107 if (rc)
7108 goto init_hw_err;
7109 break;
7110
7111 default:
7112 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7113 break;
7114 }
7115
7116 if (!BP_NOMCP(bp)) {
7117 int func = BP_FUNC(bp);
7118
7119 bp->fw_drv_pulse_wr_seq =
7120 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7121 DRV_PULSE_SEQ_MASK);
7122 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7123 }
7124
7125 /* this needs to be done before gunzip end */
7126 bnx2x_zero_def_sb(bp);
7127 for_each_queue(bp, i)
7128 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7129#ifdef BCM_CNIC
7130 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7131#endif
7132
7133init_hw_err:
7134 bnx2x_gunzip_end(bp);
7135
7136 return rc;
7137}
7138
7139static void bnx2x_free_mem(struct bnx2x *bp)
7140{
7141
7142#define BNX2X_PCI_FREE(x, y, size) \
7143 do { \
7144 if (x) { \
7145 dma_free_coherent(&bp->pdev->dev, size, x, y); \
7146 x = NULL; \
7147 y = 0; \
7148 } \
7149 } while (0)
7150
7151#define BNX2X_FREE(x) \
7152 do { \
7153 if (x) { \
7154 vfree(x); \
7155 x = NULL; \
7156 } \
7157 } while (0)
7158
7159 int i;
7160
7161 /* fastpath */
7162 /* Common */
7163 for_each_queue(bp, i) {
7164
7165 /* status blocks */
7166 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7167 bnx2x_fp(bp, i, status_blk_mapping),
7168 sizeof(struct host_status_block));
7169 }
7170 /* Rx */
7171 for_each_queue(bp, i) {
7172
7173 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7174 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7175 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7176 bnx2x_fp(bp, i, rx_desc_mapping),
7177 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7178
7179 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7180 bnx2x_fp(bp, i, rx_comp_mapping),
7181 sizeof(struct eth_fast_path_rx_cqe) *
7182 NUM_RCQ_BD);
7183
7184 /* SGE ring */
7185 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7186 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7187 bnx2x_fp(bp, i, rx_sge_mapping),
7188 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7189 }
7190 /* Tx */
7191 for_each_queue(bp, i) {
7192
7193 /* fastpath tx rings: tx_buf tx_desc */
7194 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7195 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7196 bnx2x_fp(bp, i, tx_desc_mapping),
7197 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7198 }
7199 /* end of fastpath */
7200
7201 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7202 sizeof(struct host_def_status_block));
7203
7204 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7205 sizeof(struct bnx2x_slowpath));
7206
7207#ifdef BCM_CNIC
7208 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7209 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7210 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7211 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7212 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7213 sizeof(struct host_status_block));
7214#endif
7215 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7216
7217#undef BNX2X_PCI_FREE
7218#undef BNX2X_KFREE
7219}
7220
7221static int bnx2x_alloc_mem(struct bnx2x *bp)
7222{
7223
7224#define BNX2X_PCI_ALLOC(x, y, size) \
7225 do { \
7226 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7227 if (x == NULL) \
7228 goto alloc_mem_err; \
7229 memset(x, 0, size); \
7230 } while (0)
7231
7232#define BNX2X_ALLOC(x, size) \
7233 do { \
7234 x = vmalloc(size); \
7235 if (x == NULL) \
7236 goto alloc_mem_err; \
7237 memset(x, 0, size); \
7238 } while (0)
7239
7240 int i;
7241
7242 /* fastpath */
7243 /* Common */
7244 for_each_queue(bp, i) {
7245 bnx2x_fp(bp, i, bp) = bp;
7246
7247 /* status blocks */
7248 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7249 &bnx2x_fp(bp, i, status_blk_mapping),
7250 sizeof(struct host_status_block));
7251 }
7252 /* Rx */
7253 for_each_queue(bp, i) {
7254
7255 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7256 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7257 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7258 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7259 &bnx2x_fp(bp, i, rx_desc_mapping),
7260 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7261
7262 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7263 &bnx2x_fp(bp, i, rx_comp_mapping),
7264 sizeof(struct eth_fast_path_rx_cqe) *
7265 NUM_RCQ_BD);
7266
7267 /* SGE ring */
7268 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7269 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7270 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7271 &bnx2x_fp(bp, i, rx_sge_mapping),
7272 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7273 }
7274 /* Tx */
7275 for_each_queue(bp, i) {
7276
7277 /* fastpath tx rings: tx_buf tx_desc */
7278 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7279 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7281 &bnx2x_fp(bp, i, tx_desc_mapping),
7282 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7283 }
7284 /* end of fastpath */
7285
7286 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7287 sizeof(struct host_def_status_block));
7288
7289 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7290 sizeof(struct bnx2x_slowpath));
7291
7292#ifdef BCM_CNIC
7293 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7294
7295 /* allocate searcher T2 table
7296 we allocate 1/4 of alloc num for T2
7297 (which is not entered into the ILT) */
7298 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7299
7300 /* Initialize T2 (for 1024 connections) */
7301 for (i = 0; i < 16*1024; i += 64)
7302 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7303
7304 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7305 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7306
7307 /* QM queues (128*MAX_CONN) */
7308 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7309
7310 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7311 sizeof(struct host_status_block));
7312#endif
7313
7314 /* Slow path ring */
7315 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7316
7317 return 0;
7318
7319alloc_mem_err:
7320 bnx2x_free_mem(bp);
7321 return -ENOMEM;
7322
7323#undef BNX2X_PCI_ALLOC
7324#undef BNX2X_ALLOC
7325}
7326
7327static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7328{
7329 int i;
7330
7331 for_each_queue(bp, i) {
7332 struct bnx2x_fastpath *fp = &bp->fp[i];
7333
7334 u16 bd_cons = fp->tx_bd_cons;
7335 u16 sw_prod = fp->tx_pkt_prod;
7336 u16 sw_cons = fp->tx_pkt_cons;
7337
7338 while (sw_cons != sw_prod) {
7339 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7340 sw_cons++;
7341 }
7342 }
7343}
7344
7345static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7346{
7347 int i, j;
7348
7349 for_each_queue(bp, j) {
7350 struct bnx2x_fastpath *fp = &bp->fp[j];
7351
7352 for (i = 0; i < NUM_RX_BD; i++) {
7353 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7354 struct sk_buff *skb = rx_buf->skb;
7355
7356 if (skb == NULL)
7357 continue;
7358
7359 dma_unmap_single(&bp->pdev->dev,
7360 dma_unmap_addr(rx_buf, mapping),
7361 bp->rx_buf_size, DMA_FROM_DEVICE);
7362
7363 rx_buf->skb = NULL;
7364 dev_kfree_skb(skb);
7365 }
7366 if (!fp->disable_tpa)
7367 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7368 ETH_MAX_AGGREGATION_QUEUES_E1 :
7369 ETH_MAX_AGGREGATION_QUEUES_E1H);
7370 }
7371}
7372
7373static void bnx2x_free_skbs(struct bnx2x *bp)
7374{
7375 bnx2x_free_tx_skbs(bp);
7376 bnx2x_free_rx_skbs(bp);
7377}
7378
7379static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7380{
7381 int i, offset = 1;
7382
7383 free_irq(bp->msix_table[0].vector, bp->dev);
7384 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7385 bp->msix_table[0].vector);
7386
7387#ifdef BCM_CNIC
7388 offset++;
7389#endif
7390 for_each_queue(bp, i) {
7391 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7392 "state %x\n", i, bp->msix_table[i + offset].vector,
7393 bnx2x_fp(bp, i, state));
7394
7395 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7396 }
7397}
7398
7399static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7400{
7401 if (bp->flags & USING_MSIX_FLAG) {
7402 if (!disable_only)
7403 bnx2x_free_msix_irqs(bp);
7404 pci_disable_msix(bp->pdev);
7405 bp->flags &= ~USING_MSIX_FLAG;
7406
7407 } else if (bp->flags & USING_MSI_FLAG) {
7408 if (!disable_only)
7409 free_irq(bp->pdev->irq, bp->dev);
7410 pci_disable_msi(bp->pdev);
7411 bp->flags &= ~USING_MSI_FLAG;
7412
7413 } else if (!disable_only)
7414 free_irq(bp->pdev->irq, bp->dev);
7415}
7416
7417static int bnx2x_enable_msix(struct bnx2x *bp)
7418{
7419 int i, rc, offset = 1;
7420 int igu_vec = 0;
7421
7422 bp->msix_table[0].entry = igu_vec;
7423 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7424
7425#ifdef BCM_CNIC
7426 igu_vec = BP_L_ID(bp) + offset;
7427 bp->msix_table[1].entry = igu_vec;
7428 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7429 offset++;
7430#endif
7431 for_each_queue(bp, i) {
7432 igu_vec = BP_L_ID(bp) + offset + i;
7433 bp->msix_table[i + offset].entry = igu_vec;
7434 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7435 "(fastpath #%u)\n", i + offset, igu_vec, i);
7436 }
7437
7438 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7439 BNX2X_NUM_QUEUES(bp) + offset);
7440
7441 /*
7442 * reconfigure number of tx/rx queues according to available
7443 * MSI-X vectors
7444 */
7445 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7446 /* vectors available for FP */
7447 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7448
7449 DP(NETIF_MSG_IFUP,
7450 "Trying to use less MSI-X vectors: %d\n", rc);
7451
7452 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7453
7454 if (rc) {
7455 DP(NETIF_MSG_IFUP,
7456 "MSI-X is not attainable rc %d\n", rc);
7457 return rc;
7458 }
7459
7460 bp->num_queues = min(bp->num_queues, fp_vec);
7461
7462 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7463 bp->num_queues);
7464 } else if (rc) {
7465 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7466 return rc;
7467 }
7468
7469 bp->flags |= USING_MSIX_FLAG;
7470
7471 return 0;
7472}
7473
7474static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7475{
7476 int i, rc, offset = 1;
7477
7478 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7479 bp->dev->name, bp->dev);
7480 if (rc) {
7481 BNX2X_ERR("request sp irq failed\n");
7482 return -EBUSY;
7483 }
7484
7485#ifdef BCM_CNIC
7486 offset++;
7487#endif
7488 for_each_queue(bp, i) {
7489 struct bnx2x_fastpath *fp = &bp->fp[i];
7490 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7491 bp->dev->name, i);
7492
7493 rc = request_irq(bp->msix_table[i + offset].vector,
7494 bnx2x_msix_fp_int, 0, fp->name, fp);
7495 if (rc) {
7496 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7497 bnx2x_free_msix_irqs(bp);
7498 return -EBUSY;
7499 }
7500
7501 fp->state = BNX2X_FP_STATE_IRQ;
7502 }
7503
7504 i = BNX2X_NUM_QUEUES(bp);
7505 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7506 " ... fp[%d] %d\n",
7507 bp->msix_table[0].vector,
7508 0, bp->msix_table[offset].vector,
7509 i - 1, bp->msix_table[offset + i - 1].vector);
7510
7511 return 0;
7512}
7513
7514static int bnx2x_enable_msi(struct bnx2x *bp)
7515{
7516 int rc;
7517
7518 rc = pci_enable_msi(bp->pdev);
7519 if (rc) {
7520 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7521 return -1;
7522 }
7523 bp->flags |= USING_MSI_FLAG;
7524
7525 return 0;
7526}
7527
7528static int bnx2x_req_irq(struct bnx2x *bp)
7529{
7530 unsigned long flags;
7531 int rc;
7532
7533 if (bp->flags & USING_MSI_FLAG)
7534 flags = 0;
7535 else
7536 flags = IRQF_SHARED;
7537
7538 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7539 bp->dev->name, bp->dev);
7540 if (!rc)
7541 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7542
7543 return rc;
7544}
7545
7546static void bnx2x_napi_enable(struct bnx2x *bp)
7547{
7548 int i;
7549
7550 for_each_queue(bp, i)
7551 napi_enable(&bnx2x_fp(bp, i, napi));
7552}
7553
7554static void bnx2x_napi_disable(struct bnx2x *bp)
7555{
7556 int i;
7557
7558 for_each_queue(bp, i)
7559 napi_disable(&bnx2x_fp(bp, i, napi));
7560}
7561
7562static void bnx2x_netif_start(struct bnx2x *bp)
7563{
7564 int intr_sem;
7565
7566 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7567 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7568
7569 if (intr_sem) {
7570 if (netif_running(bp->dev)) {
7571 bnx2x_napi_enable(bp);
7572 bnx2x_int_enable(bp);
7573 if (bp->state == BNX2X_STATE_OPEN)
7574 netif_tx_wake_all_queues(bp->dev);
7575 }
7576 }
7577}
7578
7579static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7580{
7581 bnx2x_int_disable_sync(bp, disable_hw);
7582 bnx2x_napi_disable(bp);
7583 netif_tx_disable(bp->dev);
7584}
7585
7586/*
7587 * Init service functions
7588 */
7589
7590/**
7591 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7592 *
7593 * @param bp driver descriptor
7594 * @param set set or clear an entry (1 or 0)
7595 * @param mac pointer to a buffer containing a MAC
7596 * @param cl_bit_vec bit vector of clients to register a MAC for
7597 * @param cam_offset offset in a CAM to use
7598 * @param with_bcast set broadcast MAC as well
7599 */
7600static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7601 u32 cl_bit_vec, u8 cam_offset,
7602 u8 with_bcast)
7603{
7604 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7605 int port = BP_PORT(bp);
7606
7607 /* CAM allocation
7608 * unicasts 0-31:port0 32-63:port1
7609 * multicast 64-127:port0 128-191:port1
7610 */
7611 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7612 config->hdr.offset = cam_offset;
7613 config->hdr.client_id = 0xff;
7614 config->hdr.reserved1 = 0;
7615
7616 /* primary MAC */
7617 config->config_table[0].cam_entry.msb_mac_addr =
7618 swab16(*(u16 *)&mac[0]);
7619 config->config_table[0].cam_entry.middle_mac_addr =
7620 swab16(*(u16 *)&mac[2]);
7621 config->config_table[0].cam_entry.lsb_mac_addr =
7622 swab16(*(u16 *)&mac[4]);
7623 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7624 if (set)
7625 config->config_table[0].target_table_entry.flags = 0;
7626 else
7627 CAM_INVALIDATE(config->config_table[0]);
7628 config->config_table[0].target_table_entry.clients_bit_vector =
7629 cpu_to_le32(cl_bit_vec);
7630 config->config_table[0].target_table_entry.vlan_id = 0;
7631
7632 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7633 (set ? "setting" : "clearing"),
7634 config->config_table[0].cam_entry.msb_mac_addr,
7635 config->config_table[0].cam_entry.middle_mac_addr,
7636 config->config_table[0].cam_entry.lsb_mac_addr);
7637
7638 /* broadcast */
7639 if (with_bcast) {
7640 config->config_table[1].cam_entry.msb_mac_addr =
7641 cpu_to_le16(0xffff);
7642 config->config_table[1].cam_entry.middle_mac_addr =
7643 cpu_to_le16(0xffff);
7644 config->config_table[1].cam_entry.lsb_mac_addr =
7645 cpu_to_le16(0xffff);
7646 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7647 if (set)
7648 config->config_table[1].target_table_entry.flags =
7649 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7650 else
7651 CAM_INVALIDATE(config->config_table[1]);
7652 config->config_table[1].target_table_entry.clients_bit_vector =
7653 cpu_to_le32(cl_bit_vec);
7654 config->config_table[1].target_table_entry.vlan_id = 0;
7655 }
7656
7657 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7658 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7659 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7660}
7661
7662/**
7663 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7664 *
7665 * @param bp driver descriptor
7666 * @param set set or clear an entry (1 or 0)
7667 * @param mac pointer to a buffer containing a MAC
7668 * @param cl_bit_vec bit vector of clients to register a MAC for
7669 * @param cam_offset offset in a CAM to use
7670 */
7671static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7672 u32 cl_bit_vec, u8 cam_offset)
7673{
7674 struct mac_configuration_cmd_e1h *config =
7675 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7676
7677 config->hdr.length = 1;
7678 config->hdr.offset = cam_offset;
7679 config->hdr.client_id = 0xff;
7680 config->hdr.reserved1 = 0;
7681
7682 /* primary MAC */
7683 config->config_table[0].msb_mac_addr =
7684 swab16(*(u16 *)&mac[0]);
7685 config->config_table[0].middle_mac_addr =
7686 swab16(*(u16 *)&mac[2]);
7687 config->config_table[0].lsb_mac_addr =
7688 swab16(*(u16 *)&mac[4]);
7689 config->config_table[0].clients_bit_vector =
7690 cpu_to_le32(cl_bit_vec);
7691 config->config_table[0].vlan_id = 0;
7692 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7693 if (set)
7694 config->config_table[0].flags = BP_PORT(bp);
7695 else
7696 config->config_table[0].flags =
7697 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7698
7699 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7700 (set ? "setting" : "clearing"),
7701 config->config_table[0].msb_mac_addr,
7702 config->config_table[0].middle_mac_addr,
7703 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7704
7705 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7706 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7707 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7708}
7709
7710static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7711 int *state_p, int poll)
7712{
7713 /* can take a while if any port is running */
7714 int cnt = 5000;
7715
7716 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7717 poll ? "polling" : "waiting", state, idx);
7718
7719 might_sleep();
7720 while (cnt--) {
7721 if (poll) {
7722 bnx2x_rx_int(bp->fp, 10);
7723 /* if index is different from 0
7724 * the reply for some commands will
7725 * be on the non default queue
7726 */
7727 if (idx)
7728 bnx2x_rx_int(&bp->fp[idx], 10);
7729 }
7730
7731 mb(); /* state is changed by bnx2x_sp_event() */
7732 if (*state_p == state) {
7733#ifdef BNX2X_STOP_ON_ERROR
7734 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7735#endif
7736 return 0;
7737 }
7738
7739 msleep(1);
7740
7741 if (bp->panic)
7742 return -EIO;
7743 }
7744
7745 /* timeout! */
7746 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7747 poll ? "polling" : "waiting", state, idx);
7748#ifdef BNX2X_STOP_ON_ERROR
7749 bnx2x_panic();
7750#endif
7751
7752 return -EBUSY;
7753}
7754
7755static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7756{
7757 bp->set_mac_pending++;
7758 smp_wmb();
7759
7760 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7761 (1 << bp->fp->cl_id), BP_FUNC(bp));
7762
7763 /* Wait for a completion */
7764 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7765}
7766
7767static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7768{
7769 bp->set_mac_pending++;
7770 smp_wmb();
7771
7772 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7773 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7774 1);
7775
7776 /* Wait for a completion */
7777 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7778}
7779
7780#ifdef BCM_CNIC
7781/**
7782 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7783 * MAC(s). This function will wait until the ramdord completion
7784 * returns.
7785 *
7786 * @param bp driver handle
7787 * @param set set or clear the CAM entry
7788 *
7789 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7790 */
7791static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7792{
7793 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7794
7795 bp->set_mac_pending++;
7796 smp_wmb();
7797
7798 /* Send a SET_MAC ramrod */
7799 if (CHIP_IS_E1(bp))
7800 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7801 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7802 1);
7803 else
7804 /* CAM allocation for E1H
7805 * unicasts: by func number
7806 * multicast: 20+FUNC*20, 20 each
7807 */
7808 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7809 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7810
7811 /* Wait for a completion when setting */
7812 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7813
7814 return 0;
7815}
7816#endif
7817
7818static int bnx2x_setup_leading(struct bnx2x *bp)
7819{
7820 int rc;
7821
7822 /* reset IGU state */
7823 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7824
7825 /* SETUP ramrod */
7826 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7827
7828 /* Wait for completion */
7829 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7830
7831 return rc;
7832}
7833
7834static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7835{
7836 struct bnx2x_fastpath *fp = &bp->fp[index];
7837
7838 /* reset IGU state */
7839 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7840
7841 /* SETUP ramrod */
7842 fp->state = BNX2X_FP_STATE_OPENING;
7843 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7844 fp->cl_id, 0);
7845
7846 /* Wait for completion */
7847 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7848 &(fp->state), 0);
7849}
7850
7851static int bnx2x_poll(struct napi_struct *napi, int budget);
7852
7853static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7854{
7855
7856 switch (bp->multi_mode) {
7857 case ETH_RSS_MODE_DISABLED:
7858 bp->num_queues = 1;
7859 break;
7860
7861 case ETH_RSS_MODE_REGULAR:
7862 if (num_queues)
7863 bp->num_queues = min_t(u32, num_queues,
7864 BNX2X_MAX_QUEUES(bp));
7865 else
7866 bp->num_queues = min_t(u32, num_online_cpus(),
7867 BNX2X_MAX_QUEUES(bp));
7868 break;
7869
7870
7871 default:
7872 bp->num_queues = 1;
7873 break;
7874 }
7875}
7876
7877static int bnx2x_set_num_queues(struct bnx2x *bp)
7878{
7879 int rc = 0;
7880
7881 switch (int_mode) {
7882 case INT_MODE_INTx:
7883 case INT_MODE_MSI:
7884 bp->num_queues = 1;
7885 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7886 break;
7887 default:
7888 /* Set number of queues according to bp->multi_mode value */
7889 bnx2x_set_num_queues_msix(bp);
7890
7891 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7892 bp->num_queues);
7893
7894 /* if we can't use MSI-X we only need one fp,
7895 * so try to enable MSI-X with the requested number of fp's
7896 * and fallback to MSI or legacy INTx with one fp
7897 */
7898 rc = bnx2x_enable_msix(bp);
7899 if (rc)
7900 /* failed to enable MSI-X */
7901 bp->num_queues = 1;
7902 break;
7903 }
7904 bp->dev->real_num_tx_queues = bp->num_queues;
7905 return rc;
7906}
7907
7908#ifdef BCM_CNIC
7909static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7910static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7911#endif
7912
7913/* must be called with rtnl_lock */
7914static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7915{
7916 u32 load_code;
7917 int i, rc;
7918
7919#ifdef BNX2X_STOP_ON_ERROR
7920 if (unlikely(bp->panic))
7921 return -EPERM;
7922#endif
7923
7924 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7925
7926 rc = bnx2x_set_num_queues(bp);
7927
7928 if (bnx2x_alloc_mem(bp)) {
7929 bnx2x_free_irq(bp, true);
7930 return -ENOMEM;
7931 }
7932
7933 for_each_queue(bp, i)
7934 bnx2x_fp(bp, i, disable_tpa) =
7935 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7936
7937 for_each_queue(bp, i)
7938 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7939 bnx2x_poll, 128);
7940
7941 bnx2x_napi_enable(bp);
7942
7943 if (bp->flags & USING_MSIX_FLAG) {
7944 rc = bnx2x_req_msix_irqs(bp);
7945 if (rc) {
7946 bnx2x_free_irq(bp, true);
7947 goto load_error1;
7948 }
7949 } else {
7950 /* Fall to INTx if failed to enable MSI-X due to lack of
7951 memory (in bnx2x_set_num_queues()) */
7952 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7953 bnx2x_enable_msi(bp);
7954 bnx2x_ack_int(bp);
7955 rc = bnx2x_req_irq(bp);
7956 if (rc) {
7957 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7958 bnx2x_free_irq(bp, true);
7959 goto load_error1;
7960 }
7961 if (bp->flags & USING_MSI_FLAG) {
7962 bp->dev->irq = bp->pdev->irq;
7963 netdev_info(bp->dev, "using MSI IRQ %d\n",
7964 bp->pdev->irq);
7965 }
7966 }
7967
7968 /* Send LOAD_REQUEST command to MCP
7969 Returns the type of LOAD command:
7970 if it is the first port to be initialized
7971 common blocks should be initialized, otherwise - not
7972 */
7973 if (!BP_NOMCP(bp)) {
7974 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7975 if (!load_code) {
7976 BNX2X_ERR("MCP response failure, aborting\n");
7977 rc = -EBUSY;
7978 goto load_error2;
7979 }
7980 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7981 rc = -EBUSY; /* other port in diagnostic mode */
7982 goto load_error2;
7983 }
7984
7985 } else {
7986 int port = BP_PORT(bp);
7987
7988 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7989 load_count[0], load_count[1], load_count[2]);
7990 load_count[0]++;
7991 load_count[1 + port]++;
7992 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7993 load_count[0], load_count[1], load_count[2]);
7994 if (load_count[0] == 1)
7995 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7996 else if (load_count[1 + port] == 1)
7997 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7998 else
7999 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
8000 }
8001
8002 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8003 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8004 bp->port.pmf = 1;
8005 else
8006 bp->port.pmf = 0;
8007 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8008
8009 /* Initialize HW */
8010 rc = bnx2x_init_hw(bp, load_code);
8011 if (rc) {
8012 BNX2X_ERR("HW init failed, aborting\n");
8013 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8016 goto load_error2;
8017 }
8018
8019 /* Setup NIC internals and enable interrupts */
8020 bnx2x_nic_init(bp, load_code);
8021
8022 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8023 (bp->common.shmem2_base))
8024 SHMEM2_WR(bp, dcc_support,
8025 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8026 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8027
8028 /* Send LOAD_DONE command to MCP */
8029 if (!BP_NOMCP(bp)) {
8030 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8031 if (!load_code) {
8032 BNX2X_ERR("MCP response failure, aborting\n");
8033 rc = -EBUSY;
8034 goto load_error3;
8035 }
8036 }
8037
8038 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8039
8040 rc = bnx2x_setup_leading(bp);
8041 if (rc) {
8042 BNX2X_ERR("Setup leading failed!\n");
8043#ifndef BNX2X_STOP_ON_ERROR
8044 goto load_error3;
8045#else
8046 bp->panic = 1;
8047 return -EBUSY;
8048#endif
8049 }
8050
8051 if (CHIP_IS_E1H(bp))
8052 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8053 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8054 bp->flags |= MF_FUNC_DIS;
8055 }
8056
8057 if (bp->state == BNX2X_STATE_OPEN) {
8058#ifdef BCM_CNIC
8059 /* Enable Timer scan */
8060 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8061#endif
8062 for_each_nondefault_queue(bp, i) {
8063 rc = bnx2x_setup_multi(bp, i);
8064 if (rc)
8065#ifdef BCM_CNIC
8066 goto load_error4;
8067#else
8068 goto load_error3;
8069#endif
8070 }
8071
8072 if (CHIP_IS_E1(bp))
8073 bnx2x_set_eth_mac_addr_e1(bp, 1);
8074 else
8075 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8076#ifdef BCM_CNIC
8077 /* Set iSCSI L2 MAC */
8078 mutex_lock(&bp->cnic_mutex);
8079 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8080 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8081 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8082 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8083 CNIC_SB_ID(bp));
8084 }
8085 mutex_unlock(&bp->cnic_mutex);
8086#endif
8087 }
8088
8089 if (bp->port.pmf)
8090 bnx2x_initial_phy_init(bp, load_mode);
8091
8092 /* Start fast path */
8093 switch (load_mode) {
8094 case LOAD_NORMAL:
8095 if (bp->state == BNX2X_STATE_OPEN) {
8096 /* Tx queue should be only reenabled */
8097 netif_tx_wake_all_queues(bp->dev);
8098 }
8099 /* Initialize the receive filter. */
8100 bnx2x_set_rx_mode(bp->dev);
8101 break;
8102
8103 case LOAD_OPEN:
8104 netif_tx_start_all_queues(bp->dev);
8105 if (bp->state != BNX2X_STATE_OPEN)
8106 netif_tx_disable(bp->dev);
8107 /* Initialize the receive filter. */
8108 bnx2x_set_rx_mode(bp->dev);
8109 break;
8110
8111 case LOAD_DIAG:
8112 /* Initialize the receive filter. */
8113 bnx2x_set_rx_mode(bp->dev);
8114 bp->state = BNX2X_STATE_DIAG;
8115 break;
8116
8117 default:
8118 break;
8119 }
8120
8121 if (!bp->port.pmf)
8122 bnx2x__link_status_update(bp);
8123
8124 /* start the timer */
8125 mod_timer(&bp->timer, jiffies + bp->current_interval);
8126
8127#ifdef BCM_CNIC
8128 bnx2x_setup_cnic_irq_info(bp);
8129 if (bp->state == BNX2X_STATE_OPEN)
8130 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8131#endif
8132 bnx2x_inc_load_cnt(bp);
8133
8134 return 0;
8135
8136#ifdef BCM_CNIC
8137load_error4:
8138 /* Disable Timer scan */
8139 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8140#endif
8141load_error3:
8142 bnx2x_int_disable_sync(bp, 1);
8143 if (!BP_NOMCP(bp)) {
8144 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8145 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8146 }
8147 bp->port.pmf = 0;
8148 /* Free SKBs, SGEs, TPA pool and driver internals */
8149 bnx2x_free_skbs(bp);
8150 for_each_queue(bp, i)
8151 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8152load_error2:
8153 /* Release IRQs */
8154 bnx2x_free_irq(bp, false);
8155load_error1:
8156 bnx2x_napi_disable(bp);
8157 for_each_queue(bp, i)
8158 netif_napi_del(&bnx2x_fp(bp, i, napi));
8159 bnx2x_free_mem(bp);
8160
8161 return rc;
8162}
8163
8164static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8165{
8166 struct bnx2x_fastpath *fp = &bp->fp[index];
8167 int rc;
8168
8169 /* halt the connection */
8170 fp->state = BNX2X_FP_STATE_HALTING;
8171 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8172
8173 /* Wait for completion */
8174 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8175 &(fp->state), 1);
8176 if (rc) /* timeout */
8177 return rc;
8178
8179 /* delete cfc entry */
8180 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8181
8182 /* Wait for completion */
8183 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8184 &(fp->state), 1);
8185 return rc;
8186}
8187
8188static int bnx2x_stop_leading(struct bnx2x *bp)
8189{
8190 __le16 dsb_sp_prod_idx;
8191 /* if the other port is handling traffic,
8192 this can take a lot of time */
8193 int cnt = 500;
8194 int rc;
8195
8196 might_sleep();
8197
8198 /* Send HALT ramrod */
8199 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8200 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8201
8202 /* Wait for completion */
8203 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8204 &(bp->fp[0].state), 1);
8205 if (rc) /* timeout */
8206 return rc;
8207
8208 dsb_sp_prod_idx = *bp->dsb_sp_prod;
8209
8210 /* Send PORT_DELETE ramrod */
8211 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8212
8213 /* Wait for completion to arrive on default status block
8214 we are going to reset the chip anyway
8215 so there is not much to do if this times out
8216 */
8217 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8218 if (!cnt) {
8219 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8220 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8221 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8222#ifdef BNX2X_STOP_ON_ERROR
8223 bnx2x_panic();
8224#endif
8225 rc = -EBUSY;
8226 break;
8227 }
8228 cnt--;
8229 msleep(1);
8230 rmb(); /* Refresh the dsb_sp_prod */
8231 }
8232 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8233 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8234
8235 return rc;
8236}
8237
8238static void bnx2x_reset_func(struct bnx2x *bp)
8239{
8240 int port = BP_PORT(bp);
8241 int func = BP_FUNC(bp);
8242 int base, i;
8243
8244 /* Configure IGU */
8245 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8246 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8247
8248#ifdef BCM_CNIC
8249 /* Disable Timer scan */
8250 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8251 /*
8252 * Wait for at least 10ms and up to 2 second for the timers scan to
8253 * complete
8254 */
8255 for (i = 0; i < 200; i++) {
8256 msleep(10);
8257 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8258 break;
8259 }
8260#endif
8261 /* Clear ILT */
8262 base = FUNC_ILT_BASE(func);
8263 for (i = base; i < base + ILT_PER_FUNC; i++)
8264 bnx2x_ilt_wr(bp, i, 0);
8265}
8266
8267static void bnx2x_reset_port(struct bnx2x *bp)
8268{
8269 int port = BP_PORT(bp);
8270 u32 val;
8271
8272 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8273
8274 /* Do not rcv packets to BRB */
8275 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8276 /* Do not direct rcv packets that are not for MCP to the BRB */
8277 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8278 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8279
8280 /* Configure AEU */
8281 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8282
8283 msleep(100);
8284 /* Check for BRB port occupancy */
8285 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8286 if (val)
8287 DP(NETIF_MSG_IFDOWN,
8288 "BRB1 is not empty %d blocks are occupied\n", val);
8289
8290 /* TODO: Close Doorbell port? */
8291}
8292
8293static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8294{
8295 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8296 BP_FUNC(bp), reset_code);
8297
8298 switch (reset_code) {
8299 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8300 bnx2x_reset_port(bp);
8301 bnx2x_reset_func(bp);
8302 bnx2x_reset_common(bp);
8303 break;
8304
8305 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8306 bnx2x_reset_port(bp);
8307 bnx2x_reset_func(bp);
8308 break;
8309
8310 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8311 bnx2x_reset_func(bp);
8312 break;
8313
8314 default:
8315 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8316 break;
8317 }
8318}
8319
8320static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8321{
8322 int port = BP_PORT(bp);
8323 u32 reset_code = 0;
8324 int i, cnt, rc;
8325
8326 /* Wait until tx fastpath tasks complete */
8327 for_each_queue(bp, i) {
8328 struct bnx2x_fastpath *fp = &bp->fp[i];
8329
8330 cnt = 1000;
8331 while (bnx2x_has_tx_work_unload(fp)) {
8332
8333 bnx2x_tx_int(fp);
8334 if (!cnt) {
8335 BNX2X_ERR("timeout waiting for queue[%d]\n",
8336 i);
8337#ifdef BNX2X_STOP_ON_ERROR
8338 bnx2x_panic();
8339 return -EBUSY;
8340#else
8341 break;
8342#endif
8343 }
8344 cnt--;
8345 msleep(1);
8346 }
8347 }
8348 /* Give HW time to discard old tx messages */
8349 msleep(1);
8350
8351 if (CHIP_IS_E1(bp)) {
8352 struct mac_configuration_cmd *config =
8353 bnx2x_sp(bp, mcast_config);
8354
8355 bnx2x_set_eth_mac_addr_e1(bp, 0);
8356
8357 for (i = 0; i < config->hdr.length; i++)
8358 CAM_INVALIDATE(config->config_table[i]);
8359
8360 config->hdr.length = i;
8361 if (CHIP_REV_IS_SLOW(bp))
8362 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8363 else
8364 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8365 config->hdr.client_id = bp->fp->cl_id;
8366 config->hdr.reserved1 = 0;
8367
8368 bp->set_mac_pending++;
8369 smp_wmb();
8370
8371 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8372 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8373 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8374
8375 } else { /* E1H */
8376 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8377
8378 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8379
8380 for (i = 0; i < MC_HASH_SIZE; i++)
8381 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8382
8383 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8384 }
8385#ifdef BCM_CNIC
8386 /* Clear iSCSI L2 MAC */
8387 mutex_lock(&bp->cnic_mutex);
8388 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8389 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8390 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8391 }
8392 mutex_unlock(&bp->cnic_mutex);
8393#endif
8394
8395 if (unload_mode == UNLOAD_NORMAL)
8396 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8397
8398 else if (bp->flags & NO_WOL_FLAG)
8399 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8400
8401 else if (bp->wol) {
8402 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8403 u8 *mac_addr = bp->dev->dev_addr;
8404 u32 val;
8405 /* The mac address is written to entries 1-4 to
8406 preserve entry 0 which is used by the PMF */
8407 u8 entry = (BP_E1HVN(bp) + 1)*8;
8408
8409 val = (mac_addr[0] << 8) | mac_addr[1];
8410 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8411
8412 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8413 (mac_addr[4] << 8) | mac_addr[5];
8414 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8415
8416 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8417
8418 } else
8419 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8420
8421 /* Close multi and leading connections
8422 Completions for ramrods are collected in a synchronous way */
8423 for_each_nondefault_queue(bp, i)
8424 if (bnx2x_stop_multi(bp, i))
8425 goto unload_error;
8426
8427 rc = bnx2x_stop_leading(bp);
8428 if (rc) {
8429 BNX2X_ERR("Stop leading failed!\n");
8430#ifdef BNX2X_STOP_ON_ERROR
8431 return -EBUSY;
8432#else
8433 goto unload_error;
8434#endif
8435 }
8436
8437unload_error:
8438 if (!BP_NOMCP(bp))
8439 reset_code = bnx2x_fw_command(bp, reset_code);
8440 else {
8441 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8442 load_count[0], load_count[1], load_count[2]);
8443 load_count[0]--;
8444 load_count[1 + port]--;
8445 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8446 load_count[0], load_count[1], load_count[2]);
8447 if (load_count[0] == 0)
8448 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8449 else if (load_count[1 + port] == 0)
8450 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8451 else
8452 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8453 }
8454
8455 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8456 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8457 bnx2x__link_reset(bp);
8458
8459 /* Reset the chip */
8460 bnx2x_reset_chip(bp, reset_code);
8461
8462 /* Report UNLOAD_DONE to MCP */
8463 if (!BP_NOMCP(bp))
8464 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8465
8466}
8467
8468static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8469{
8470 u32 val;
8471
8472 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8473
8474 if (CHIP_IS_E1(bp)) {
8475 int port = BP_PORT(bp);
8476 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8478
8479 val = REG_RD(bp, addr);
8480 val &= ~(0x300);
8481 REG_WR(bp, addr, val);
8482 } else if (CHIP_IS_E1H(bp)) {
8483 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8484 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8485 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8486 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8487 }
8488}
8489
8490/* must be called with rtnl_lock */
8491static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8492{
8493 int i;
8494
8495 if (bp->state == BNX2X_STATE_CLOSED) {
8496 /* Interface has been removed - nothing to recover */
8497 bp->recovery_state = BNX2X_RECOVERY_DONE;
8498 bp->is_leader = 0;
8499 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8500 smp_wmb();
8501
8502 return -EINVAL;
8503 }
8504
8505#ifdef BCM_CNIC
8506 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8507#endif
8508 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8509
8510 /* Set "drop all" */
8511 bp->rx_mode = BNX2X_RX_MODE_NONE;
8512 bnx2x_set_storm_rx_mode(bp);
8513
8514 /* Disable HW interrupts, NAPI and Tx */
8515 bnx2x_netif_stop(bp, 1);
8516 netif_carrier_off(bp->dev);
8517
8518 del_timer_sync(&bp->timer);
8519 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8520 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8521 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8522
8523 /* Release IRQs */
8524 bnx2x_free_irq(bp, false);
8525
8526 /* Cleanup the chip if needed */
8527 if (unload_mode != UNLOAD_RECOVERY)
8528 bnx2x_chip_cleanup(bp, unload_mode);
8529
8530 bp->port.pmf = 0;
8531
8532 /* Free SKBs, SGEs, TPA pool and driver internals */
8533 bnx2x_free_skbs(bp);
8534 for_each_queue(bp, i)
8535 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8536 for_each_queue(bp, i)
8537 netif_napi_del(&bnx2x_fp(bp, i, napi));
8538 bnx2x_free_mem(bp);
8539
8540 bp->state = BNX2X_STATE_CLOSED;
8541
8542 /* The last driver must disable a "close the gate" if there is no
8543 * parity attention or "process kill" pending.
8544 */
8545 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8546 bnx2x_reset_is_done(bp))
8547 bnx2x_disable_close_the_gate(bp);
8548
8549 /* Reset MCP mail box sequence if there is on going recovery */
8550 if (unload_mode == UNLOAD_RECOVERY)
8551 bp->fw_seq = 0;
8552
8553 return 0;
8554}
8555
8556/* Close gates #2, #3 and #4: */
8557static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8558{
8559 u32 val, addr;
8560
8561 /* Gates #2 and #4a are closed/opened for "not E1" only */
8562 if (!CHIP_IS_E1(bp)) {
8563 /* #4 */
8564 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8565 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8566 close ? (val | 0x1) : (val & (~(u32)1)));
8567 /* #2 */
8568 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8569 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8570 close ? (val | 0x1) : (val & (~(u32)1)));
8571 }
8572
8573 /* #3 */
8574 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8575 val = REG_RD(bp, addr);
8576 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8577
8578 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8579 close ? "closing" : "opening");
8580 mmiowb();
8581}
8582
8583#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8584
8585static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8586{
8587 /* Do some magic... */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 *magic_val = val & SHARED_MF_CLP_MAGIC;
8590 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8591}
8592
8593/* Restore the value of the `magic' bit.
8594 *
8595 * @param pdev Device handle.
8596 * @param magic_val Old value of the `magic' bit.
8597 */
8598static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8599{
8600 /* Restore the `magic' bit value... */
8601 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8602 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8603 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8604 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8605 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8606 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8607}
8608
8609/* Prepares for MCP reset: takes care of CLP configurations.
8610 *
8611 * @param bp
8612 * @param magic_val Old value of 'magic' bit.
8613 */
8614static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8615{
8616 u32 shmem;
8617 u32 validity_offset;
8618
8619 DP(NETIF_MSG_HW, "Starting\n");
8620
8621 /* Set `magic' bit in order to save MF config */
8622 if (!CHIP_IS_E1(bp))
8623 bnx2x_clp_reset_prep(bp, magic_val);
8624
8625 /* Get shmem offset */
8626 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8627 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8628
8629 /* Clear validity map flags */
8630 if (shmem > 0)
8631 REG_WR(bp, shmem + validity_offset, 0);
8632}
8633
8634#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8635#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8636
8637/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8638 * depending on the HW type.
8639 *
8640 * @param bp
8641 */
8642static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8643{
8644 /* special handling for emulation and FPGA,
8645 wait 10 times longer */
8646 if (CHIP_REV_IS_SLOW(bp))
8647 msleep(MCP_ONE_TIMEOUT*10);
8648 else
8649 msleep(MCP_ONE_TIMEOUT);
8650}
8651
8652static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8653{
8654 u32 shmem, cnt, validity_offset, val;
8655 int rc = 0;
8656
8657 msleep(100);
8658
8659 /* Get shmem offset */
8660 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8661 if (shmem == 0) {
8662 BNX2X_ERR("Shmem 0 return failure\n");
8663 rc = -ENOTTY;
8664 goto exit_lbl;
8665 }
8666
8667 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8668
8669 /* Wait for MCP to come up */
8670 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8671 /* TBD: its best to check validity map of last port.
8672 * currently checks on port 0.
8673 */
8674 val = REG_RD(bp, shmem + validity_offset);
8675 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8676 shmem + validity_offset, val);
8677
8678 /* check that shared memory is valid. */
8679 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8680 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8681 break;
8682
8683 bnx2x_mcp_wait_one(bp);
8684 }
8685
8686 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8687
8688 /* Check that shared memory is valid. This indicates that MCP is up. */
8689 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8690 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8691 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8692 rc = -ENOTTY;
8693 goto exit_lbl;
8694 }
8695
8696exit_lbl:
8697 /* Restore the `magic' bit value */
8698 if (!CHIP_IS_E1(bp))
8699 bnx2x_clp_reset_done(bp, magic_val);
8700
8701 return rc;
8702}
8703
8704static void bnx2x_pxp_prep(struct bnx2x *bp)
8705{
8706 if (!CHIP_IS_E1(bp)) {
8707 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8708 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8709 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8710 mmiowb();
8711 }
8712}
8713
8714/*
8715 * Reset the whole chip except for:
8716 * - PCIE core
8717 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8718 * one reset bit)
8719 * - IGU
8720 * - MISC (including AEU)
8721 * - GRC
8722 * - RBCN, RBCP
8723 */
8724static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8725{
8726 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8727
8728 not_reset_mask1 =
8729 MISC_REGISTERS_RESET_REG_1_RST_HC |
8730 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8731 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8732
8733 not_reset_mask2 =
8734 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8735 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8736 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8737 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8738 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8739 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8740 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8741 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8742
8743 reset_mask1 = 0xffffffff;
8744
8745 if (CHIP_IS_E1(bp))
8746 reset_mask2 = 0xffff;
8747 else
8748 reset_mask2 = 0x1ffff;
8749
8750 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8751 reset_mask1 & (~not_reset_mask1));
8752 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8753 reset_mask2 & (~not_reset_mask2));
8754
8755 barrier();
8756 mmiowb();
8757
8758 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8759 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8760 mmiowb();
8761}
8762
8763static int bnx2x_process_kill(struct bnx2x *bp)
8764{
8765 int cnt = 1000;
8766 u32 val = 0;
8767 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8768
8769
8770 /* Empty the Tetris buffer, wait for 1s */
8771 do {
8772 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8773 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8774 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8775 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8776 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8777 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8778 ((port_is_idle_0 & 0x1) == 0x1) &&
8779 ((port_is_idle_1 & 0x1) == 0x1) &&
8780 (pgl_exp_rom2 == 0xffffffff))
8781 break;
8782 msleep(1);
8783 } while (cnt-- > 0);
8784
8785 if (cnt <= 0) {
8786 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8787 " are still"
8788 " outstanding read requests after 1s!\n");
8789 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8790 " port_is_idle_0=0x%08x,"
8791 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8792 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8793 pgl_exp_rom2);
8794 return -EAGAIN;
8795 }
8796
8797 barrier();
8798
8799 /* Close gates #2, #3 and #4 */
8800 bnx2x_set_234_gates(bp, true);
8801
8802 /* TBD: Indicate that "process kill" is in progress to MCP */
8803
8804 /* Clear "unprepared" bit */
8805 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8806 barrier();
8807
8808 /* Make sure all is written to the chip before the reset */
8809 mmiowb();
8810
8811 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8812 * PSWHST, GRC and PSWRD Tetris buffer.
8813 */
8814 msleep(1);
8815
8816 /* Prepare to chip reset: */
8817 /* MCP */
8818 bnx2x_reset_mcp_prep(bp, &val);
8819
8820 /* PXP */
8821 bnx2x_pxp_prep(bp);
8822 barrier();
8823
8824 /* reset the chip */
8825 bnx2x_process_kill_chip_reset(bp);
8826 barrier();
8827
8828 /* Recover after reset: */
8829 /* MCP */
8830 if (bnx2x_reset_mcp_comp(bp, val))
8831 return -EAGAIN;
8832
8833 /* PXP */
8834 bnx2x_pxp_prep(bp);
8835
8836 /* Open the gates #2, #3 and #4 */
8837 bnx2x_set_234_gates(bp, false);
8838
8839 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8840 * reset state, re-enable attentions. */
8841
8842 return 0;
8843}
8844
8845static int bnx2x_leader_reset(struct bnx2x *bp)
8846{
8847 int rc = 0;
8848 /* Try to recover after the failure */
8849 if (bnx2x_process_kill(bp)) {
8850 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8851 bp->dev->name);
8852 rc = -EAGAIN;
8853 goto exit_leader_reset;
8854 }
8855
8856 /* Clear "reset is in progress" bit and update the driver state */
8857 bnx2x_set_reset_done(bp);
8858 bp->recovery_state = BNX2X_RECOVERY_DONE;
8859
8860exit_leader_reset:
8861 bp->is_leader = 0;
8862 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8863 smp_wmb();
8864 return rc;
8865}
8866
8867static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8868
8869/* Assumption: runs under rtnl lock. This together with the fact
8870 * that it's called only from bnx2x_reset_task() ensure that it
8871 * will never be called when netif_running(bp->dev) is false.
8872 */
8873static void bnx2x_parity_recover(struct bnx2x *bp)
8874{
8875 DP(NETIF_MSG_HW, "Handling parity\n");
8876 while (1) {
8877 switch (bp->recovery_state) {
8878 case BNX2X_RECOVERY_INIT:
8879 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8880 /* Try to get a LEADER_LOCK HW lock */
8881 if (bnx2x_trylock_hw_lock(bp,
8882 HW_LOCK_RESOURCE_RESERVED_08))
8883 bp->is_leader = 1;
8884
8885 /* Stop the driver */
8886 /* If interface has been removed - break */
8887 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8888 return;
8889
8890 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8891 /* Ensure "is_leader" and "recovery_state"
8892 * update values are seen on other CPUs
8893 */
8894 smp_wmb();
8895 break;
8896
8897 case BNX2X_RECOVERY_WAIT:
8898 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8899 if (bp->is_leader) {
8900 u32 load_counter = bnx2x_get_load_cnt(bp);
8901 if (load_counter) {
8902 /* Wait until all other functions get
8903 * down.
8904 */
8905 schedule_delayed_work(&bp->reset_task,
8906 HZ/10);
8907 return;
8908 } else {
8909 /* If all other functions got down -
8910 * try to bring the chip back to
8911 * normal. In any case it's an exit
8912 * point for a leader.
8913 */
8914 if (bnx2x_leader_reset(bp) ||
8915 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8916 printk(KERN_ERR"%s: Recovery "
8917 "has failed. Power cycle is "
8918 "needed.\n", bp->dev->name);
8919 /* Disconnect this device */
8920 netif_device_detach(bp->dev);
8921 /* Block ifup for all function
8922 * of this ASIC until
8923 * "process kill" or power
8924 * cycle.
8925 */
8926 bnx2x_set_reset_in_progress(bp);
8927 /* Shut down the power */
8928 bnx2x_set_power_state(bp,
8929 PCI_D3hot);
8930 return;
8931 }
8932
8933 return;
8934 }
8935 } else { /* non-leader */
8936 if (!bnx2x_reset_is_done(bp)) {
8937 /* Try to get a LEADER_LOCK HW lock as
8938 * long as a former leader may have
8939 * been unloaded by the user or
8940 * released a leadership by another
8941 * reason.
8942 */
8943 if (bnx2x_trylock_hw_lock(bp,
8944 HW_LOCK_RESOURCE_RESERVED_08)) {
8945 /* I'm a leader now! Restart a
8946 * switch case.
8947 */
8948 bp->is_leader = 1;
8949 break;
8950 }
8951
8952 schedule_delayed_work(&bp->reset_task,
8953 HZ/10);
8954 return;
8955
8956 } else { /* A leader has completed
8957 * the "process kill". It's an exit
8958 * point for a non-leader.
8959 */
8960 bnx2x_nic_load(bp, LOAD_NORMAL);
8961 bp->recovery_state =
8962 BNX2X_RECOVERY_DONE;
8963 smp_wmb();
8964 return;
8965 }
8966 }
8967 default:
8968 return;
8969 }
8970 }
8971}
8972
8973/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8974 * scheduled on a general queue in order to prevent a dead lock.
8975 */
8976static void bnx2x_reset_task(struct work_struct *work)
8977{
8978 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8979
8980#ifdef BNX2X_STOP_ON_ERROR
8981 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8982 " so reset not done to allow debug dump,\n"
8983 KERN_ERR " you will need to reboot when done\n");
8984 return;
8985#endif
8986
8987 rtnl_lock();
8988
8989 if (!netif_running(bp->dev))
8990 goto reset_task_exit;
8991
8992 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8993 bnx2x_parity_recover(bp);
8994 else {
8995 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8996 bnx2x_nic_load(bp, LOAD_NORMAL);
8997 }
8998
8999reset_task_exit:
9000 rtnl_unlock();
9001}
9002
9003/* end of nic load/unload */
9004
9005/* ethtool_ops */
9006
9007/*
9008 * Init service functions
9009 */
9010
9011static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9012{
9013 switch (func) {
9014 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9015 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9016 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9017 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9018 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9019 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9020 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9021 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9022 default:
9023 BNX2X_ERR("Unsupported function index: %d\n", func);
9024 return (u32)(-1);
9025 }
9026}
9027
9028static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9029{
9030 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9031
9032 /* Flush all outstanding writes */
9033 mmiowb();
9034
9035 /* Pretend to be function 0 */
9036 REG_WR(bp, reg, 0);
9037 /* Flush the GRC transaction (in the chip) */
9038 new_val = REG_RD(bp, reg);
9039 if (new_val != 0) {
9040 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9041 new_val);
9042 BUG();
9043 }
9044
9045 /* From now we are in the "like-E1" mode */
9046 bnx2x_int_disable(bp);
9047
9048 /* Flush all outstanding writes */
9049 mmiowb();
9050
9051 /* Restore the original funtion settings */
9052 REG_WR(bp, reg, orig_func);
9053 new_val = REG_RD(bp, reg);
9054 if (new_val != orig_func) {
9055 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9056 orig_func, new_val);
9057 BUG();
9058 }
9059}
9060
9061static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9062{
9063 if (CHIP_IS_E1H(bp))
9064 bnx2x_undi_int_disable_e1h(bp, func);
9065 else
9066 bnx2x_int_disable(bp);
9067}
9068
9069static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9070{
9071 u32 val;
9072
9073 /* Check if there is any driver already loaded */
9074 val = REG_RD(bp, MISC_REG_UNPREPARED);
9075 if (val == 0x1) {
9076 /* Check if it is the UNDI driver
9077 * UNDI driver initializes CID offset for normal bell to 0x7
9078 */
9079 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9080 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9081 if (val == 0x7) {
9082 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9083 /* save our func */
9084 int func = BP_FUNC(bp);
9085 u32 swap_en;
9086 u32 swap_val;
9087
9088 /* clear the UNDI indication */
9089 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9090
9091 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9092
9093 /* try unload UNDI on port 0 */
9094 bp->func = 0;
9095 bp->fw_seq =
9096 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9097 DRV_MSG_SEQ_NUMBER_MASK);
9098 reset_code = bnx2x_fw_command(bp, reset_code);
9099
9100 /* if UNDI is loaded on the other port */
9101 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9102
9103 /* send "DONE" for previous unload */
9104 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9105
9106 /* unload UNDI on port 1 */
9107 bp->func = 1;
9108 bp->fw_seq =
9109 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9110 DRV_MSG_SEQ_NUMBER_MASK);
9111 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9112
9113 bnx2x_fw_command(bp, reset_code);
9114 }
9115
9116 /* now it's safe to release the lock */
9117 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9118
9119 bnx2x_undi_int_disable(bp, func);
9120
9121 /* close input traffic and wait for it */
9122 /* Do not rcv packets to BRB */
9123 REG_WR(bp,
9124 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9125 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9126 /* Do not direct rcv packets that are not for MCP to
9127 * the BRB */
9128 REG_WR(bp,
9129 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9130 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9131 /* clear AEU */
9132 REG_WR(bp,
9133 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9134 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9135 msleep(10);
9136
9137 /* save NIG port swap info */
9138 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9139 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9140 /* reset device */
9141 REG_WR(bp,
9142 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9143 0xd3ffffff);
9144 REG_WR(bp,
9145 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9146 0x1403);
9147 /* take the NIG out of reset and restore swap values */
9148 REG_WR(bp,
9149 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9150 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9151 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9152 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9153
9154 /* send unload done to the MCP */
9155 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9156
9157 /* restore our func and fw_seq */
9158 bp->func = func;
9159 bp->fw_seq =
9160 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9161 DRV_MSG_SEQ_NUMBER_MASK);
9162
9163 } else
9164 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9165 }
9166}
9167
9168static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9169{
9170 u32 val, val2, val3, val4, id;
9171 u16 pmc;
9172
9173 /* Get the chip revision id and number. */
9174 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9175 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9176 id = ((val & 0xffff) << 16);
9177 val = REG_RD(bp, MISC_REG_CHIP_REV);
9178 id |= ((val & 0xf) << 12);
9179 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9180 id |= ((val & 0xff) << 4);
9181 val = REG_RD(bp, MISC_REG_BOND_ID);
9182 id |= (val & 0xf);
9183 bp->common.chip_id = id;
9184 bp->link_params.chip_id = bp->common.chip_id;
9185 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9186
9187 val = (REG_RD(bp, 0x2874) & 0x55);
9188 if ((bp->common.chip_id & 0x1) ||
9189 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9190 bp->flags |= ONE_PORT_FLAG;
9191 BNX2X_DEV_INFO("single port device\n");
9192 }
9193
9194 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9195 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9196 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9197 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9198 bp->common.flash_size, bp->common.flash_size);
9199
9200 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9201 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9202 bp->link_params.shmem_base = bp->common.shmem_base;
9203 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9204 bp->common.shmem_base, bp->common.shmem2_base);
9205
9206 if (!bp->common.shmem_base ||
9207 (bp->common.shmem_base < 0xA0000) ||
9208 (bp->common.shmem_base >= 0xC0000)) {
9209 BNX2X_DEV_INFO("MCP not active\n");
9210 bp->flags |= NO_MCP_FLAG;
9211 return;
9212 }
9213
9214 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9215 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9216 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9217 BNX2X_ERROR("BAD MCP validity signature\n");
9218
9219 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9220 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9221
9222 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9223 SHARED_HW_CFG_LED_MODE_MASK) >>
9224 SHARED_HW_CFG_LED_MODE_SHIFT);
9225
9226 bp->link_params.feature_config_flags = 0;
9227 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9228 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9229 bp->link_params.feature_config_flags |=
9230 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9231 else
9232 bp->link_params.feature_config_flags &=
9233 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9234
9235 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9236 bp->common.bc_ver = val;
9237 BNX2X_DEV_INFO("bc_ver %X\n", val);
9238 if (val < BNX2X_BC_VER) {
9239 /* for now only warn
9240 * later we might need to enforce this */
9241 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9242 "please upgrade BC\n", BNX2X_BC_VER, val);
9243 }
9244 bp->link_params.feature_config_flags |=
9245 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9246 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9247
9248 if (BP_E1HVN(bp) == 0) {
9249 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9250 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9251 } else {
9252 /* no WOL capability for E1HVN != 0 */
9253 bp->flags |= NO_WOL_FLAG;
9254 }
9255 BNX2X_DEV_INFO("%sWoL capable\n",
9256 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9257
9258 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9259 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9260 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9261 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9262
9263 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9264 val, val2, val3, val4);
9265}
9266
9267static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9268 u32 switch_cfg)
9269{
9270 int port = BP_PORT(bp);
9271 u32 ext_phy_type;
9272
9273 switch (switch_cfg) {
9274 case SWITCH_CFG_1G:
9275 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9276
9277 ext_phy_type =
9278 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9279 switch (ext_phy_type) {
9280 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9281 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9282 ext_phy_type);
9283
9284 bp->port.supported |= (SUPPORTED_10baseT_Half |
9285 SUPPORTED_10baseT_Full |
9286 SUPPORTED_100baseT_Half |
9287 SUPPORTED_100baseT_Full |
9288 SUPPORTED_1000baseT_Full |
9289 SUPPORTED_2500baseX_Full |
9290 SUPPORTED_TP |
9291 SUPPORTED_FIBRE |
9292 SUPPORTED_Autoneg |
9293 SUPPORTED_Pause |
9294 SUPPORTED_Asym_Pause);
9295 break;
9296
9297 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9298 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9299 ext_phy_type);
9300
9301 bp->port.supported |= (SUPPORTED_10baseT_Half |
9302 SUPPORTED_10baseT_Full |
9303 SUPPORTED_100baseT_Half |
9304 SUPPORTED_100baseT_Full |
9305 SUPPORTED_1000baseT_Full |
9306 SUPPORTED_TP |
9307 SUPPORTED_FIBRE |
9308 SUPPORTED_Autoneg |
9309 SUPPORTED_Pause |
9310 SUPPORTED_Asym_Pause);
9311 break;
9312
9313 default:
9314 BNX2X_ERR("NVRAM config error. "
9315 "BAD SerDes ext_phy_config 0x%x\n",
9316 bp->link_params.ext_phy_config);
9317 return;
9318 }
9319
9320 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9321 port*0x10);
9322 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9323 break;
9324
9325 case SWITCH_CFG_10G:
9326 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9327
9328 ext_phy_type =
9329 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9330 switch (ext_phy_type) {
9331 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9332 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9333 ext_phy_type);
9334
9335 bp->port.supported |= (SUPPORTED_10baseT_Half |
9336 SUPPORTED_10baseT_Full |
9337 SUPPORTED_100baseT_Half |
9338 SUPPORTED_100baseT_Full |
9339 SUPPORTED_1000baseT_Full |
9340 SUPPORTED_2500baseX_Full |
9341 SUPPORTED_10000baseT_Full |
9342 SUPPORTED_TP |
9343 SUPPORTED_FIBRE |
9344 SUPPORTED_Autoneg |
9345 SUPPORTED_Pause |
9346 SUPPORTED_Asym_Pause);
9347 break;
9348
9349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9350 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9351 ext_phy_type);
9352
9353 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9354 SUPPORTED_1000baseT_Full |
9355 SUPPORTED_FIBRE |
9356 SUPPORTED_Autoneg |
9357 SUPPORTED_Pause |
9358 SUPPORTED_Asym_Pause);
9359 break;
9360
9361 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9362 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9363 ext_phy_type);
9364
9365 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9366 SUPPORTED_2500baseX_Full |
9367 SUPPORTED_1000baseT_Full |
9368 SUPPORTED_FIBRE |
9369 SUPPORTED_Autoneg |
9370 SUPPORTED_Pause |
9371 SUPPORTED_Asym_Pause);
9372 break;
9373
9374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9375 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9376 ext_phy_type);
9377
9378 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9379 SUPPORTED_FIBRE |
9380 SUPPORTED_Pause |
9381 SUPPORTED_Asym_Pause);
9382 break;
9383
9384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9385 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9386 ext_phy_type);
9387
9388 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9389 SUPPORTED_1000baseT_Full |
9390 SUPPORTED_FIBRE |
9391 SUPPORTED_Pause |
9392 SUPPORTED_Asym_Pause);
9393 break;
9394
9395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9396 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9397 ext_phy_type);
9398
9399 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9400 SUPPORTED_1000baseT_Full |
9401 SUPPORTED_Autoneg |
9402 SUPPORTED_FIBRE |
9403 SUPPORTED_Pause |
9404 SUPPORTED_Asym_Pause);
9405 break;
9406
9407 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9408 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9409 ext_phy_type);
9410
9411 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9412 SUPPORTED_1000baseT_Full |
9413 SUPPORTED_Autoneg |
9414 SUPPORTED_FIBRE |
9415 SUPPORTED_Pause |
9416 SUPPORTED_Asym_Pause);
9417 break;
9418
9419 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9420 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9421 ext_phy_type);
9422
9423 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9424 SUPPORTED_TP |
9425 SUPPORTED_Autoneg |
9426 SUPPORTED_Pause |
9427 SUPPORTED_Asym_Pause);
9428 break;
9429
9430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9431 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9432 ext_phy_type);
9433
9434 bp->port.supported |= (SUPPORTED_10baseT_Half |
9435 SUPPORTED_10baseT_Full |
9436 SUPPORTED_100baseT_Half |
9437 SUPPORTED_100baseT_Full |
9438 SUPPORTED_1000baseT_Full |
9439 SUPPORTED_10000baseT_Full |
9440 SUPPORTED_TP |
9441 SUPPORTED_Autoneg |
9442 SUPPORTED_Pause |
9443 SUPPORTED_Asym_Pause);
9444 break;
9445
9446 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9447 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9448 bp->link_params.ext_phy_config);
9449 break;
9450
9451 default:
9452 BNX2X_ERR("NVRAM config error. "
9453 "BAD XGXS ext_phy_config 0x%x\n",
9454 bp->link_params.ext_phy_config);
9455 return;
9456 }
9457
9458 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9459 port*0x18);
9460 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9461
9462 break;
9463
9464 default:
9465 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9466 bp->port.link_config);
9467 return;
9468 }
9469 bp->link_params.phy_addr = bp->port.phy_addr;
9470
9471 /* mask what we support according to speed_cap_mask */
9472 if (!(bp->link_params.speed_cap_mask &
9473 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9474 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9475
9476 if (!(bp->link_params.speed_cap_mask &
9477 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9478 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9479
9480 if (!(bp->link_params.speed_cap_mask &
9481 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9482 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9483
9484 if (!(bp->link_params.speed_cap_mask &
9485 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9486 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9487
9488 if (!(bp->link_params.speed_cap_mask &
9489 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9490 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9491 SUPPORTED_1000baseT_Full);
9492
9493 if (!(bp->link_params.speed_cap_mask &
9494 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9495 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9496
9497 if (!(bp->link_params.speed_cap_mask &
9498 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9499 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9500
9501 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9502}
9503
9504static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9505{
9506 bp->link_params.req_duplex = DUPLEX_FULL;
9507
9508 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9509 case PORT_FEATURE_LINK_SPEED_AUTO:
9510 if (bp->port.supported & SUPPORTED_Autoneg) {
9511 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9512 bp->port.advertising = bp->port.supported;
9513 } else {
9514 u32 ext_phy_type =
9515 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9516
9517 if ((ext_phy_type ==
9518 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9519 (ext_phy_type ==
9520 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9521 /* force 10G, no AN */
9522 bp->link_params.req_line_speed = SPEED_10000;
9523 bp->port.advertising =
9524 (ADVERTISED_10000baseT_Full |
9525 ADVERTISED_FIBRE);
9526 break;
9527 }
9528 BNX2X_ERR("NVRAM config error. "
9529 "Invalid link_config 0x%x"
9530 " Autoneg not supported\n",
9531 bp->port.link_config);
9532 return;
9533 }
9534 break;
9535
9536 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9537 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9538 bp->link_params.req_line_speed = SPEED_10;
9539 bp->port.advertising = (ADVERTISED_10baseT_Full |
9540 ADVERTISED_TP);
9541 } else {
9542 BNX2X_ERROR("NVRAM config error. "
9543 "Invalid link_config 0x%x"
9544 " speed_cap_mask 0x%x\n",
9545 bp->port.link_config,
9546 bp->link_params.speed_cap_mask);
9547 return;
9548 }
9549 break;
9550
9551 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9552 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9553 bp->link_params.req_line_speed = SPEED_10;
9554 bp->link_params.req_duplex = DUPLEX_HALF;
9555 bp->port.advertising = (ADVERTISED_10baseT_Half |
9556 ADVERTISED_TP);
9557 } else {
9558 BNX2X_ERROR("NVRAM config error. "
9559 "Invalid link_config 0x%x"
9560 " speed_cap_mask 0x%x\n",
9561 bp->port.link_config,
9562 bp->link_params.speed_cap_mask);
9563 return;
9564 }
9565 break;
9566
9567 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9568 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9569 bp->link_params.req_line_speed = SPEED_100;
9570 bp->port.advertising = (ADVERTISED_100baseT_Full |
9571 ADVERTISED_TP);
9572 } else {
9573 BNX2X_ERROR("NVRAM config error. "
9574 "Invalid link_config 0x%x"
9575 " speed_cap_mask 0x%x\n",
9576 bp->port.link_config,
9577 bp->link_params.speed_cap_mask);
9578 return;
9579 }
9580 break;
9581
9582 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9583 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9584 bp->link_params.req_line_speed = SPEED_100;
9585 bp->link_params.req_duplex = DUPLEX_HALF;
9586 bp->port.advertising = (ADVERTISED_100baseT_Half |
9587 ADVERTISED_TP);
9588 } else {
9589 BNX2X_ERROR("NVRAM config error. "
9590 "Invalid link_config 0x%x"
9591 " speed_cap_mask 0x%x\n",
9592 bp->port.link_config,
9593 bp->link_params.speed_cap_mask);
9594 return;
9595 }
9596 break;
9597
9598 case PORT_FEATURE_LINK_SPEED_1G:
9599 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9600 bp->link_params.req_line_speed = SPEED_1000;
9601 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9602 ADVERTISED_TP);
9603 } else {
9604 BNX2X_ERROR("NVRAM config error. "
9605 "Invalid link_config 0x%x"
9606 " speed_cap_mask 0x%x\n",
9607 bp->port.link_config,
9608 bp->link_params.speed_cap_mask);
9609 return;
9610 }
9611 break;
9612
9613 case PORT_FEATURE_LINK_SPEED_2_5G:
9614 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9615 bp->link_params.req_line_speed = SPEED_2500;
9616 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9617 ADVERTISED_TP);
9618 } else {
9619 BNX2X_ERROR("NVRAM config error. "
9620 "Invalid link_config 0x%x"
9621 " speed_cap_mask 0x%x\n",
9622 bp->port.link_config,
9623 bp->link_params.speed_cap_mask);
9624 return;
9625 }
9626 break;
9627
9628 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9629 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9630 case PORT_FEATURE_LINK_SPEED_10G_KR:
9631 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9632 bp->link_params.req_line_speed = SPEED_10000;
9633 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9634 ADVERTISED_FIBRE);
9635 } else {
9636 BNX2X_ERROR("NVRAM config error. "
9637 "Invalid link_config 0x%x"
9638 " speed_cap_mask 0x%x\n",
9639 bp->port.link_config,
9640 bp->link_params.speed_cap_mask);
9641 return;
9642 }
9643 break;
9644
9645 default:
9646 BNX2X_ERROR("NVRAM config error. "
9647 "BAD link speed link_config 0x%x\n",
9648 bp->port.link_config);
9649 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9650 bp->port.advertising = bp->port.supported;
9651 break;
9652 }
9653
9654 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9655 PORT_FEATURE_FLOW_CONTROL_MASK);
9656 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9657 !(bp->port.supported & SUPPORTED_Autoneg))
9658 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9659
9660 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
9661 " advertising 0x%x\n",
9662 bp->link_params.req_line_speed,
9663 bp->link_params.req_duplex,
9664 bp->link_params.req_flow_ctrl, bp->port.advertising);
9665}
9666
9667static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9668{
9669 mac_hi = cpu_to_be16(mac_hi);
9670 mac_lo = cpu_to_be32(mac_lo);
9671 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9672 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9673}
9674
9675static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9676{
9677 int port = BP_PORT(bp);
9678 u32 val, val2;
9679 u32 config;
9680 u16 i;
9681 u32 ext_phy_type;
9682
9683 bp->link_params.bp = bp;
9684 bp->link_params.port = port;
9685
9686 bp->link_params.lane_config =
9687 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9688 bp->link_params.ext_phy_config =
9689 SHMEM_RD(bp,
9690 dev_info.port_hw_config[port].external_phy_config);
9691 /* BCM8727_NOC => BCM8727 no over current */
9692 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9693 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9694 bp->link_params.ext_phy_config &=
9695 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9696 bp->link_params.ext_phy_config |=
9697 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9698 bp->link_params.feature_config_flags |=
9699 FEATURE_CONFIG_BCM8727_NOC;
9700 }
9701
9702 bp->link_params.speed_cap_mask =
9703 SHMEM_RD(bp,
9704 dev_info.port_hw_config[port].speed_capability_mask);
9705
9706 bp->port.link_config =
9707 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9708
9709 /* Get the 4 lanes xgxs config rx and tx */
9710 for (i = 0; i < 2; i++) {
9711 val = SHMEM_RD(bp,
9712 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9713 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9714 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9715
9716 val = SHMEM_RD(bp,
9717 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9718 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9719 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9720 }
9721
9722 /* If the device is capable of WoL, set the default state according
9723 * to the HW
9724 */
9725 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9726 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9727 (config & PORT_FEATURE_WOL_ENABLED));
9728
9729 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9730 " speed_cap_mask 0x%08x link_config 0x%08x\n",
9731 bp->link_params.lane_config,
9732 bp->link_params.ext_phy_config,
9733 bp->link_params.speed_cap_mask, bp->port.link_config);
9734
9735 bp->link_params.switch_cfg |= (bp->port.link_config &
9736 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9737 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9738
9739 bnx2x_link_settings_requested(bp);
9740
9741 /*
9742 * If connected directly, work with the internal PHY, otherwise, work
9743 * with the external PHY
9744 */
9745 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9746 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9747 bp->mdio.prtad = bp->link_params.phy_addr;
9748
9749 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9750 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9751 bp->mdio.prtad =
9752 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9753
9754 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9755 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9756 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9757 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9758 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9759
9760#ifdef BCM_CNIC
9761 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9762 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9763 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9764#endif
9765}
9766
9767static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9768{
9769 int func = BP_FUNC(bp);
9770 u32 val, val2;
9771 int rc = 0;
9772
9773 bnx2x_get_common_hwinfo(bp);
9774
9775 bp->e1hov = 0;
9776 bp->e1hmf = 0;
9777 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9778 bp->mf_config =
9779 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9780
9781 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9782 FUNC_MF_CFG_E1HOV_TAG_MASK);
9783 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9784 bp->e1hmf = 1;
9785 BNX2X_DEV_INFO("%s function mode\n",
9786 IS_E1HMF(bp) ? "multi" : "single");
9787
9788 if (IS_E1HMF(bp)) {
9789 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9790 e1hov_tag) &
9791 FUNC_MF_CFG_E1HOV_TAG_MASK);
9792 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9793 bp->e1hov = val;
9794 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9795 "(0x%04x)\n",
9796 func, bp->e1hov, bp->e1hov);
9797 } else {
9798 BNX2X_ERROR("No valid E1HOV for func %d,"
9799 " aborting\n", func);
9800 rc = -EPERM;
9801 }
9802 } else {
9803 if (BP_E1HVN(bp)) {
9804 BNX2X_ERROR("VN %d in single function mode,"
9805 " aborting\n", BP_E1HVN(bp));
9806 rc = -EPERM;
9807 }
9808 }
9809 }
9810
9811 if (!BP_NOMCP(bp)) {
9812 bnx2x_get_port_hwinfo(bp);
9813
9814 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9815 DRV_MSG_SEQ_NUMBER_MASK);
9816 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9817 }
9818
9819 if (IS_E1HMF(bp)) {
9820 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9821 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9822 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9823 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9824 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9825 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9826 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9827 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9828 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9829 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9830 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9831 ETH_ALEN);
9832 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9833 ETH_ALEN);
9834 }
9835
9836 return rc;
9837 }
9838
9839 if (BP_NOMCP(bp)) {
9840 /* only supposed to happen on emulation/FPGA */
9841 BNX2X_ERROR("warning: random MAC workaround active\n");
9842 random_ether_addr(bp->dev->dev_addr);
9843 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9844 }
9845
9846 return rc;
9847}
9848
9849static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9850{
9851 int cnt, i, block_end, rodi;
9852 char vpd_data[BNX2X_VPD_LEN+1];
9853 char str_id_reg[VENDOR_ID_LEN+1];
9854 char str_id_cap[VENDOR_ID_LEN+1];
9855 u8 len;
9856
9857 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9858 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9859
9860 if (cnt < BNX2X_VPD_LEN)
9861 goto out_not_found;
9862
9863 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9864 PCI_VPD_LRDT_RO_DATA);
9865 if (i < 0)
9866 goto out_not_found;
9867
9868
9869 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9870 pci_vpd_lrdt_size(&vpd_data[i]);
9871
9872 i += PCI_VPD_LRDT_TAG_SIZE;
9873
9874 if (block_end > BNX2X_VPD_LEN)
9875 goto out_not_found;
9876
9877 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878 PCI_VPD_RO_KEYWORD_MFR_ID);
9879 if (rodi < 0)
9880 goto out_not_found;
9881
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9883
9884 if (len != VENDOR_ID_LEN)
9885 goto out_not_found;
9886
9887 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9888
9889 /* vendor specific info */
9890 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9891 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9892 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9893 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9894
9895 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9896 PCI_VPD_RO_KEYWORD_VENDOR0);
9897 if (rodi >= 0) {
9898 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9899
9900 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9901
9902 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9903 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9904 bp->fw_ver[len] = ' ';
9905 }
9906 }
9907 return;
9908 }
9909out_not_found:
9910 return;
9911}
9912
9913static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9914{
9915 int func = BP_FUNC(bp);
9916 int timer_interval;
9917 int rc;
9918
9919 /* Disable interrupt handling until HW is initialized */
9920 atomic_set(&bp->intr_sem, 1);
9921 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9922
9923 mutex_init(&bp->port.phy_mutex);
9924 mutex_init(&bp->fw_mb_mutex);
9925#ifdef BCM_CNIC
9926 mutex_init(&bp->cnic_mutex);
9927#endif
9928
9929 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9930 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9931
9932 rc = bnx2x_get_hwinfo(bp);
9933
9934 bnx2x_read_fwinfo(bp);
9935 /* need to reset chip if undi was active */
9936 if (!BP_NOMCP(bp))
9937 bnx2x_undi_unload(bp);
9938
9939 if (CHIP_REV_IS_FPGA(bp))
9940 dev_err(&bp->pdev->dev, "FPGA detected\n");
9941
9942 if (BP_NOMCP(bp) && (func == 0))
9943 dev_err(&bp->pdev->dev, "MCP disabled, "
9944 "must load devices in order!\n");
9945
9946 /* Set multi queue mode */
9947 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9948 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9949 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9950 "requested is not MSI-X\n");
9951 multi_mode = ETH_RSS_MODE_DISABLED;
9952 }
9953 bp->multi_mode = multi_mode;
9954
9955
9956 bp->dev->features |= NETIF_F_GRO;
9957
9958 /* Set TPA flags */
9959 if (disable_tpa) {
9960 bp->flags &= ~TPA_ENABLE_FLAG;
9961 bp->dev->features &= ~NETIF_F_LRO;
9962 } else {
9963 bp->flags |= TPA_ENABLE_FLAG;
9964 bp->dev->features |= NETIF_F_LRO;
9965 }
9966
9967 if (CHIP_IS_E1(bp))
9968 bp->dropless_fc = 0;
9969 else
9970 bp->dropless_fc = dropless_fc;
9971
9972 bp->mrrs = mrrs;
9973
9974 bp->tx_ring_size = MAX_TX_AVAIL;
9975 bp->rx_ring_size = MAX_RX_AVAIL;
9976
9977 bp->rx_csum = 1;
9978
9979 /* make sure that the numbers are in the right granularity */
9980 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9981 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9982
9983 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9984 bp->current_interval = (poll ? poll : timer_interval);
9985
9986 init_timer(&bp->timer);
9987 bp->timer.expires = jiffies + bp->current_interval;
9988 bp->timer.data = (unsigned long) bp;
9989 bp->timer.function = bnx2x_timer;
9990
9991 return rc;
9992}
9993
9994/*
9995 * ethtool service functions
9996 */
9997
9998/* All ethtool functions called with rtnl_lock */
9999
10000static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10001{
10002 struct bnx2x *bp = netdev_priv(dev);
10003
10004 cmd->supported = bp->port.supported;
10005 cmd->advertising = bp->port.advertising;
10006
10007 if ((bp->state == BNX2X_STATE_OPEN) &&
10008 !(bp->flags & MF_FUNC_DIS) &&
10009 (bp->link_vars.link_up)) {
10010 cmd->speed = bp->link_vars.line_speed;
10011 cmd->duplex = bp->link_vars.duplex;
10012 if (IS_E1HMF(bp)) {
10013 u16 vn_max_rate;
10014
10015 vn_max_rate =
10016 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10017 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10018 if (vn_max_rate < cmd->speed)
10019 cmd->speed = vn_max_rate;
10020 }
10021 } else {
10022 cmd->speed = -1;
10023 cmd->duplex = -1;
10024 }
10025
10026 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10027 u32 ext_phy_type =
10028 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10029
10030 switch (ext_phy_type) {
10031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10032 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10034 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10036 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10038 cmd->port = PORT_FIBRE;
10039 break;
10040
10041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10042 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10043 cmd->port = PORT_TP;
10044 break;
10045
10046 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10047 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10048 bp->link_params.ext_phy_config);
10049 break;
10050
10051 default:
10052 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10053 bp->link_params.ext_phy_config);
10054 break;
10055 }
10056 } else
10057 cmd->port = PORT_TP;
10058
10059 cmd->phy_address = bp->mdio.prtad;
10060 cmd->transceiver = XCVR_INTERNAL;
10061
10062 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10063 cmd->autoneg = AUTONEG_ENABLE;
10064 else
10065 cmd->autoneg = AUTONEG_DISABLE;
10066
10067 cmd->maxtxpkt = 0;
10068 cmd->maxrxpkt = 0;
10069
10070 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10071 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10072 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10073 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10074 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10075 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10076 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10077
10078 return 0;
10079}
10080
10081static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10082{
10083 struct bnx2x *bp = netdev_priv(dev);
10084 u32 advertising;
10085
10086 if (IS_E1HMF(bp))
10087 return 0;
10088
10089 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10090 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10091 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10092 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10093 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10094 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10095 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10096
10097 if (cmd->autoneg == AUTONEG_ENABLE) {
10098 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10099 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10100 return -EINVAL;
10101 }
10102
10103 /* advertise the requested speed and duplex if supported */
10104 cmd->advertising &= bp->port.supported;
10105
10106 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10107 bp->link_params.req_duplex = DUPLEX_FULL;
10108 bp->port.advertising |= (ADVERTISED_Autoneg |
10109 cmd->advertising);
10110
10111 } else { /* forced speed */
10112 /* advertise the requested speed and duplex if supported */
10113 switch (cmd->speed) {
10114 case SPEED_10:
10115 if (cmd->duplex == DUPLEX_FULL) {
10116 if (!(bp->port.supported &
10117 SUPPORTED_10baseT_Full)) {
10118 DP(NETIF_MSG_LINK,
10119 "10M full not supported\n");
10120 return -EINVAL;
10121 }
10122
10123 advertising = (ADVERTISED_10baseT_Full |
10124 ADVERTISED_TP);
10125 } else {
10126 if (!(bp->port.supported &
10127 SUPPORTED_10baseT_Half)) {
10128 DP(NETIF_MSG_LINK,
10129 "10M half not supported\n");
10130 return -EINVAL;
10131 }
10132
10133 advertising = (ADVERTISED_10baseT_Half |
10134 ADVERTISED_TP);
10135 }
10136 break;
10137
10138 case SPEED_100:
10139 if (cmd->duplex == DUPLEX_FULL) {
10140 if (!(bp->port.supported &
10141 SUPPORTED_100baseT_Full)) {
10142 DP(NETIF_MSG_LINK,
10143 "100M full not supported\n");
10144 return -EINVAL;
10145 }
10146
10147 advertising = (ADVERTISED_100baseT_Full |
10148 ADVERTISED_TP);
10149 } else {
10150 if (!(bp->port.supported &
10151 SUPPORTED_100baseT_Half)) {
10152 DP(NETIF_MSG_LINK,
10153 "100M half not supported\n");
10154 return -EINVAL;
10155 }
10156
10157 advertising = (ADVERTISED_100baseT_Half |
10158 ADVERTISED_TP);
10159 }
10160 break;
10161
10162 case SPEED_1000:
10163 if (cmd->duplex != DUPLEX_FULL) {
10164 DP(NETIF_MSG_LINK, "1G half not supported\n");
10165 return -EINVAL;
10166 }
10167
10168 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10169 DP(NETIF_MSG_LINK, "1G full not supported\n");
10170 return -EINVAL;
10171 }
10172
10173 advertising = (ADVERTISED_1000baseT_Full |
10174 ADVERTISED_TP);
10175 break;
10176
10177 case SPEED_2500:
10178 if (cmd->duplex != DUPLEX_FULL) {
10179 DP(NETIF_MSG_LINK,
10180 "2.5G half not supported\n");
10181 return -EINVAL;
10182 }
10183
10184 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10185 DP(NETIF_MSG_LINK,
10186 "2.5G full not supported\n");
10187 return -EINVAL;
10188 }
10189
10190 advertising = (ADVERTISED_2500baseX_Full |
10191 ADVERTISED_TP);
10192 break;
10193
10194 case SPEED_10000:
10195 if (cmd->duplex != DUPLEX_FULL) {
10196 DP(NETIF_MSG_LINK, "10G half not supported\n");
10197 return -EINVAL;
10198 }
10199
10200 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10201 DP(NETIF_MSG_LINK, "10G full not supported\n");
10202 return -EINVAL;
10203 }
10204
10205 advertising = (ADVERTISED_10000baseT_Full |
10206 ADVERTISED_FIBRE);
10207 break;
10208
10209 default:
10210 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10211 return -EINVAL;
10212 }
10213
10214 bp->link_params.req_line_speed = cmd->speed;
10215 bp->link_params.req_duplex = cmd->duplex;
10216 bp->port.advertising = advertising;
10217 }
10218
10219 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10220 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10221 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10222 bp->port.advertising);
10223
10224 if (netif_running(dev)) {
10225 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10226 bnx2x_link_set(bp);
10227 }
10228
10229 return 0;
10230}
10231
10232#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10233#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10234
10235static int bnx2x_get_regs_len(struct net_device *dev)
10236{
10237 struct bnx2x *bp = netdev_priv(dev);
10238 int regdump_len = 0;
10239 int i;
10240
10241 if (CHIP_IS_E1(bp)) {
10242 for (i = 0; i < REGS_COUNT; i++)
10243 if (IS_E1_ONLINE(reg_addrs[i].info))
10244 regdump_len += reg_addrs[i].size;
10245
10246 for (i = 0; i < WREGS_COUNT_E1; i++)
10247 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10248 regdump_len += wreg_addrs_e1[i].size *
10249 (1 + wreg_addrs_e1[i].read_regs_count);
10250
10251 } else { /* E1H */
10252 for (i = 0; i < REGS_COUNT; i++)
10253 if (IS_E1H_ONLINE(reg_addrs[i].info))
10254 regdump_len += reg_addrs[i].size;
10255
10256 for (i = 0; i < WREGS_COUNT_E1H; i++)
10257 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10258 regdump_len += wreg_addrs_e1h[i].size *
10259 (1 + wreg_addrs_e1h[i].read_regs_count);
10260 }
10261 regdump_len *= 4;
10262 regdump_len += sizeof(struct dump_hdr);
10263
10264 return regdump_len;
10265}
10266
10267static void bnx2x_get_regs(struct net_device *dev,
10268 struct ethtool_regs *regs, void *_p)
10269{
10270 u32 *p = _p, i, j;
10271 struct bnx2x *bp = netdev_priv(dev);
10272 struct dump_hdr dump_hdr = {0};
10273
10274 regs->version = 0;
10275 memset(p, 0, regs->len);
10276
10277 if (!netif_running(bp->dev))
10278 return;
10279
10280 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10281 dump_hdr.dump_sign = dump_sign_all;
10282 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10283 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10284 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10285 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10286 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10287
10288 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10289 p += dump_hdr.hdr_size + 1;
10290
10291 if (CHIP_IS_E1(bp)) {
10292 for (i = 0; i < REGS_COUNT; i++)
10293 if (IS_E1_ONLINE(reg_addrs[i].info))
10294 for (j = 0; j < reg_addrs[i].size; j++)
10295 *p++ = REG_RD(bp,
10296 reg_addrs[i].addr + j*4);
10297
10298 } else { /* E1H */
10299 for (i = 0; i < REGS_COUNT; i++)
10300 if (IS_E1H_ONLINE(reg_addrs[i].info))
10301 for (j = 0; j < reg_addrs[i].size; j++)
10302 *p++ = REG_RD(bp,
10303 reg_addrs[i].addr + j*4);
10304 }
10305}
10306
10307#define PHY_FW_VER_LEN 10
10308
10309static void bnx2x_get_drvinfo(struct net_device *dev,
10310 struct ethtool_drvinfo *info)
10311{
10312 struct bnx2x *bp = netdev_priv(dev);
10313 u8 phy_fw_ver[PHY_FW_VER_LEN];
10314
10315 strcpy(info->driver, DRV_MODULE_NAME);
10316 strcpy(info->version, DRV_MODULE_VERSION);
10317
10318 phy_fw_ver[0] = '\0';
10319 if (bp->port.pmf) {
10320 bnx2x_acquire_phy_lock(bp);
10321 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10322 (bp->state != BNX2X_STATE_CLOSED),
10323 phy_fw_ver, PHY_FW_VER_LEN);
10324 bnx2x_release_phy_lock(bp);
10325 }
10326
10327 strncpy(info->fw_version, bp->fw_ver, 32);
10328 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10329 "bc %d.%d.%d%s%s",
10330 (bp->common.bc_ver & 0xff0000) >> 16,
10331 (bp->common.bc_ver & 0xff00) >> 8,
10332 (bp->common.bc_ver & 0xff),
10333 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10334 strcpy(info->bus_info, pci_name(bp->pdev));
10335 info->n_stats = BNX2X_NUM_STATS;
10336 info->testinfo_len = BNX2X_NUM_TESTS;
10337 info->eedump_len = bp->common.flash_size;
10338 info->regdump_len = bnx2x_get_regs_len(dev);
10339}
10340
10341static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10342{
10343 struct bnx2x *bp = netdev_priv(dev);
10344
10345 if (bp->flags & NO_WOL_FLAG) {
10346 wol->supported = 0;
10347 wol->wolopts = 0;
10348 } else {
10349 wol->supported = WAKE_MAGIC;
10350 if (bp->wol)
10351 wol->wolopts = WAKE_MAGIC;
10352 else
10353 wol->wolopts = 0;
10354 }
10355 memset(&wol->sopass, 0, sizeof(wol->sopass));
10356}
10357
10358static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10359{
10360 struct bnx2x *bp = netdev_priv(dev);
10361
10362 if (wol->wolopts & ~WAKE_MAGIC)
10363 return -EINVAL;
10364
10365 if (wol->wolopts & WAKE_MAGIC) {
10366 if (bp->flags & NO_WOL_FLAG)
10367 return -EINVAL;
10368
10369 bp->wol = 1;
10370 } else
10371 bp->wol = 0;
10372
10373 return 0;
10374}
10375
10376static u32 bnx2x_get_msglevel(struct net_device *dev)
10377{
10378 struct bnx2x *bp = netdev_priv(dev);
10379
10380 return bp->msg_enable;
10381}
10382
10383static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10384{
10385 struct bnx2x *bp = netdev_priv(dev);
10386
10387 if (capable(CAP_NET_ADMIN))
10388 bp->msg_enable = level;
10389}
10390
10391static int bnx2x_nway_reset(struct net_device *dev)
10392{
10393 struct bnx2x *bp = netdev_priv(dev);
10394
10395 if (!bp->port.pmf)
10396 return 0;
10397
10398 if (netif_running(dev)) {
10399 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10400 bnx2x_link_set(bp);
10401 }
10402
10403 return 0;
10404}
10405
10406static u32 bnx2x_get_link(struct net_device *dev)
10407{
10408 struct bnx2x *bp = netdev_priv(dev);
10409
10410 if (bp->flags & MF_FUNC_DIS)
10411 return 0;
10412
10413 return bp->link_vars.link_up;
10414}
10415
10416static int bnx2x_get_eeprom_len(struct net_device *dev)
10417{
10418 struct bnx2x *bp = netdev_priv(dev);
10419
10420 return bp->common.flash_size;
10421}
10422
10423static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10424{
10425 int port = BP_PORT(bp);
10426 int count, i;
10427 u32 val = 0;
10428
10429 /* adjust timeout for emulation/FPGA */
10430 count = NVRAM_TIMEOUT_COUNT;
10431 if (CHIP_REV_IS_SLOW(bp))
10432 count *= 100;
10433
10434 /* request access to nvram interface */
10435 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10436 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10437
10438 for (i = 0; i < count*10; i++) {
10439 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10440 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10441 break;
10442
10443 udelay(5);
10444 }
10445
10446 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10447 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10448 return -EBUSY;
10449 }
10450
10451 return 0;
10452}
10453
10454static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10455{
10456 int port = BP_PORT(bp);
10457 int count, i;
10458 u32 val = 0;
10459
10460 /* adjust timeout for emulation/FPGA */
10461 count = NVRAM_TIMEOUT_COUNT;
10462 if (CHIP_REV_IS_SLOW(bp))
10463 count *= 100;
10464
10465 /* relinquish nvram interface */
10466 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10467 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10468
10469 for (i = 0; i < count*10; i++) {
10470 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10471 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10472 break;
10473
10474 udelay(5);
10475 }
10476
10477 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10478 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10479 return -EBUSY;
10480 }
10481
10482 return 0;
10483}
10484
10485static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10486{
10487 u32 val;
10488
10489 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10490
10491 /* enable both bits, even on read */
10492 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10493 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10494 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10495}
10496
10497static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10498{
10499 u32 val;
10500
10501 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10502
10503 /* disable both bits, even after read */
10504 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10505 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10506 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10507}
10508
10509static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10510 u32 cmd_flags)
10511{
10512 int count, i, rc;
10513 u32 val;
10514
10515 /* build the command word */
10516 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10517
10518 /* need to clear DONE bit separately */
10519 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10520
10521 /* address of the NVRAM to read from */
10522 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10523 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10524
10525 /* issue a read command */
10526 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10527
10528 /* adjust timeout for emulation/FPGA */
10529 count = NVRAM_TIMEOUT_COUNT;
10530 if (CHIP_REV_IS_SLOW(bp))
10531 count *= 100;
10532
10533 /* wait for completion */
10534 *ret_val = 0;
10535 rc = -EBUSY;
10536 for (i = 0; i < count; i++) {
10537 udelay(5);
10538 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10539
10540 if (val & MCPR_NVM_COMMAND_DONE) {
10541 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10542 /* we read nvram data in cpu order
10543 * but ethtool sees it as an array of bytes
10544 * converting to big-endian will do the work */
10545 *ret_val = cpu_to_be32(val);
10546 rc = 0;
10547 break;
10548 }
10549 }
10550
10551 return rc;
10552}
10553
10554static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10555 int buf_size)
10556{
10557 int rc;
10558 u32 cmd_flags;
10559 __be32 val;
10560
10561 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10562 DP(BNX2X_MSG_NVM,
10563 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10564 offset, buf_size);
10565 return -EINVAL;
10566 }
10567
10568 if (offset + buf_size > bp->common.flash_size) {
10569 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10570 " buf_size (0x%x) > flash_size (0x%x)\n",
10571 offset, buf_size, bp->common.flash_size);
10572 return -EINVAL;
10573 }
10574
10575 /* request access to nvram interface */
10576 rc = bnx2x_acquire_nvram_lock(bp);
10577 if (rc)
10578 return rc;
10579
10580 /* enable access to nvram interface */
10581 bnx2x_enable_nvram_access(bp);
10582
10583 /* read the first word(s) */
10584 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10585 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10586 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10587 memcpy(ret_buf, &val, 4);
10588
10589 /* advance to the next dword */
10590 offset += sizeof(u32);
10591 ret_buf += sizeof(u32);
10592 buf_size -= sizeof(u32);
10593 cmd_flags = 0;
10594 }
10595
10596 if (rc == 0) {
10597 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10598 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10599 memcpy(ret_buf, &val, 4);
10600 }
10601
10602 /* disable access to nvram interface */
10603 bnx2x_disable_nvram_access(bp);
10604 bnx2x_release_nvram_lock(bp);
10605
10606 return rc;
10607}
10608
10609static int bnx2x_get_eeprom(struct net_device *dev,
10610 struct ethtool_eeprom *eeprom, u8 *eebuf)
10611{
10612 struct bnx2x *bp = netdev_priv(dev);
10613 int rc;
10614
10615 if (!netif_running(dev))
10616 return -EAGAIN;
10617
10618 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10619 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10620 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10621 eeprom->len, eeprom->len);
10622
10623 /* parameters already validated in ethtool_get_eeprom */
10624
10625 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10626
10627 return rc;
10628}
10629
10630static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10631 u32 cmd_flags)
10632{
10633 int count, i, rc;
10634
10635 /* build the command word */
10636 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10637
10638 /* need to clear DONE bit separately */
10639 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10640
10641 /* write the data */
10642 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10643
10644 /* address of the NVRAM to write to */
10645 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10646 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10647
10648 /* issue the write command */
10649 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10650
10651 /* adjust timeout for emulation/FPGA */
10652 count = NVRAM_TIMEOUT_COUNT;
10653 if (CHIP_REV_IS_SLOW(bp))
10654 count *= 100;
10655
10656 /* wait for completion */
10657 rc = -EBUSY;
10658 for (i = 0; i < count; i++) {
10659 udelay(5);
10660 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10661 if (val & MCPR_NVM_COMMAND_DONE) {
10662 rc = 0;
10663 break;
10664 }
10665 }
10666
10667 return rc;
10668}
10669
10670#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10671
10672static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10673 int buf_size)
10674{
10675 int rc;
10676 u32 cmd_flags;
10677 u32 align_offset;
10678 __be32 val;
10679
10680 if (offset + buf_size > bp->common.flash_size) {
10681 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10682 " buf_size (0x%x) > flash_size (0x%x)\n",
10683 offset, buf_size, bp->common.flash_size);
10684 return -EINVAL;
10685 }
10686
10687 /* request access to nvram interface */
10688 rc = bnx2x_acquire_nvram_lock(bp);
10689 if (rc)
10690 return rc;
10691
10692 /* enable access to nvram interface */
10693 bnx2x_enable_nvram_access(bp);
10694
10695 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10696 align_offset = (offset & ~0x03);
10697 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10698
10699 if (rc == 0) {
10700 val &= ~(0xff << BYTE_OFFSET(offset));
10701 val |= (*data_buf << BYTE_OFFSET(offset));
10702
10703 /* nvram data is returned as an array of bytes
10704 * convert it back to cpu order */
10705 val = be32_to_cpu(val);
10706
10707 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10708 cmd_flags);
10709 }
10710
10711 /* disable access to nvram interface */
10712 bnx2x_disable_nvram_access(bp);
10713 bnx2x_release_nvram_lock(bp);
10714
10715 return rc;
10716}
10717
10718static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10719 int buf_size)
10720{
10721 int rc;
10722 u32 cmd_flags;
10723 u32 val;
10724 u32 written_so_far;
10725
10726 if (buf_size == 1) /* ethtool */
10727 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10728
10729 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10730 DP(BNX2X_MSG_NVM,
10731 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10732 offset, buf_size);
10733 return -EINVAL;
10734 }
10735
10736 if (offset + buf_size > bp->common.flash_size) {
10737 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10738 " buf_size (0x%x) > flash_size (0x%x)\n",
10739 offset, buf_size, bp->common.flash_size);
10740 return -EINVAL;
10741 }
10742
10743 /* request access to nvram interface */
10744 rc = bnx2x_acquire_nvram_lock(bp);
10745 if (rc)
10746 return rc;
10747
10748 /* enable access to nvram interface */
10749 bnx2x_enable_nvram_access(bp);
10750
10751 written_so_far = 0;
10752 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10753 while ((written_so_far < buf_size) && (rc == 0)) {
10754 if (written_so_far == (buf_size - sizeof(u32)))
10755 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10756 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10757 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10758 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10759 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10760
10761 memcpy(&val, data_buf, 4);
10762
10763 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10764
10765 /* advance to the next dword */
10766 offset += sizeof(u32);
10767 data_buf += sizeof(u32);
10768 written_so_far += sizeof(u32);
10769 cmd_flags = 0;
10770 }
10771
10772 /* disable access to nvram interface */
10773 bnx2x_disable_nvram_access(bp);
10774 bnx2x_release_nvram_lock(bp);
10775
10776 return rc;
10777}
10778
10779static int bnx2x_set_eeprom(struct net_device *dev,
10780 struct ethtool_eeprom *eeprom, u8 *eebuf)
10781{
10782 struct bnx2x *bp = netdev_priv(dev);
10783 int port = BP_PORT(bp);
10784 int rc = 0;
10785
10786 if (!netif_running(dev))
10787 return -EAGAIN;
10788
10789 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10790 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10791 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10792 eeprom->len, eeprom->len);
10793
10794 /* parameters already validated in ethtool_set_eeprom */
10795
10796 /* PHY eeprom can be accessed only by the PMF */
10797 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10798 !bp->port.pmf)
10799 return -EINVAL;
10800
10801 if (eeprom->magic == 0x50485950) {
10802 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10803 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10804
10805 bnx2x_acquire_phy_lock(bp);
10806 rc |= bnx2x_link_reset(&bp->link_params,
10807 &bp->link_vars, 0);
10808 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10809 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10810 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10811 MISC_REGISTERS_GPIO_HIGH, port);
10812 bnx2x_release_phy_lock(bp);
10813 bnx2x_link_report(bp);
10814
10815 } else if (eeprom->magic == 0x50485952) {
10816 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10817 if (bp->state == BNX2X_STATE_OPEN) {
10818 bnx2x_acquire_phy_lock(bp);
10819 rc |= bnx2x_link_reset(&bp->link_params,
10820 &bp->link_vars, 1);
10821
10822 rc |= bnx2x_phy_init(&bp->link_params,
10823 &bp->link_vars);
10824 bnx2x_release_phy_lock(bp);
10825 bnx2x_calc_fc_adv(bp);
10826 }
10827 } else if (eeprom->magic == 0x53985943) {
10828 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10829 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10830 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10831 u8 ext_phy_addr =
10832 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10833
10834 /* DSP Remove Download Mode */
10835 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10836 MISC_REGISTERS_GPIO_LOW, port);
10837
10838 bnx2x_acquire_phy_lock(bp);
10839
10840 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10841
10842 /* wait 0.5 sec to allow it to run */
10843 msleep(500);
10844 bnx2x_ext_phy_hw_reset(bp, port);
10845 msleep(500);
10846 bnx2x_release_phy_lock(bp);
10847 }
10848 } else
10849 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10850
10851 return rc;
10852}
10853
10854static int bnx2x_get_coalesce(struct net_device *dev,
10855 struct ethtool_coalesce *coal)
10856{
10857 struct bnx2x *bp = netdev_priv(dev);
10858
10859 memset(coal, 0, sizeof(struct ethtool_coalesce));
10860
10861 coal->rx_coalesce_usecs = bp->rx_ticks;
10862 coal->tx_coalesce_usecs = bp->tx_ticks;
10863
10864 return 0;
10865}
10866
10867static int bnx2x_set_coalesce(struct net_device *dev,
10868 struct ethtool_coalesce *coal)
10869{
10870 struct bnx2x *bp = netdev_priv(dev);
10871
10872 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10873 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10874 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10875
10876 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10877 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10878 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10879
10880 if (netif_running(dev))
10881 bnx2x_update_coalesce(bp);
10882
10883 return 0;
10884}
10885
10886static void bnx2x_get_ringparam(struct net_device *dev,
10887 struct ethtool_ringparam *ering)
10888{
10889 struct bnx2x *bp = netdev_priv(dev);
10890
10891 ering->rx_max_pending = MAX_RX_AVAIL;
10892 ering->rx_mini_max_pending = 0;
10893 ering->rx_jumbo_max_pending = 0;
10894
10895 ering->rx_pending = bp->rx_ring_size;
10896 ering->rx_mini_pending = 0;
10897 ering->rx_jumbo_pending = 0;
10898
10899 ering->tx_max_pending = MAX_TX_AVAIL;
10900 ering->tx_pending = bp->tx_ring_size;
10901}
10902
10903static int bnx2x_set_ringparam(struct net_device *dev,
10904 struct ethtool_ringparam *ering)
10905{
10906 struct bnx2x *bp = netdev_priv(dev);
10907 int rc = 0;
10908
10909 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10910 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10911 return -EAGAIN;
10912 }
10913
10914 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10915 (ering->tx_pending > MAX_TX_AVAIL) ||
10916 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10917 return -EINVAL;
10918
10919 bp->rx_ring_size = ering->rx_pending;
10920 bp->tx_ring_size = ering->tx_pending;
10921
10922 if (netif_running(dev)) {
10923 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10924 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10925 }
10926
10927 return rc;
10928}
10929
10930static void bnx2x_get_pauseparam(struct net_device *dev,
10931 struct ethtool_pauseparam *epause)
10932{
10933 struct bnx2x *bp = netdev_priv(dev);
10934
10935 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10936 BNX2X_FLOW_CTRL_AUTO) &&
10937 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10938
10939 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10940 BNX2X_FLOW_CTRL_RX);
10941 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10942 BNX2X_FLOW_CTRL_TX);
10943
10944 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10945 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10946 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10947}
10948
10949static int bnx2x_set_pauseparam(struct net_device *dev,
10950 struct ethtool_pauseparam *epause)
10951{
10952 struct bnx2x *bp = netdev_priv(dev);
10953
10954 if (IS_E1HMF(bp))
10955 return 0;
10956
10957 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10958 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10959 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10960
10961 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10962
10963 if (epause->rx_pause)
10964 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10965
10966 if (epause->tx_pause)
10967 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10968
10969 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10970 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10971
10972 if (epause->autoneg) {
10973 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10974 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10975 return -EINVAL;
10976 }
10977
10978 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10979 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10980 }
10981
10982 DP(NETIF_MSG_LINK,
10983 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10984
10985 if (netif_running(dev)) {
10986 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10987 bnx2x_link_set(bp);
10988 }
10989
10990 return 0;
10991}
10992
10993static int bnx2x_set_flags(struct net_device *dev, u32 data)
10994{
10995 struct bnx2x *bp = netdev_priv(dev);
10996 int changed = 0;
10997 int rc = 0;
10998
10999 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
11000 return -EINVAL;
11001
11002 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11003 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11004 return -EAGAIN;
11005 }
11006
11007 /* TPA requires Rx CSUM offloading */
11008 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
11009 if (!disable_tpa) {
11010 if (!(dev->features & NETIF_F_LRO)) {
11011 dev->features |= NETIF_F_LRO;
11012 bp->flags |= TPA_ENABLE_FLAG;
11013 changed = 1;
11014 }
11015 } else
11016 rc = -EINVAL;
11017 } else if (dev->features & NETIF_F_LRO) {
11018 dev->features &= ~NETIF_F_LRO;
11019 bp->flags &= ~TPA_ENABLE_FLAG;
11020 changed = 1;
11021 }
11022
11023 if (data & ETH_FLAG_RXHASH)
11024 dev->features |= NETIF_F_RXHASH;
11025 else
11026 dev->features &= ~NETIF_F_RXHASH;
11027
11028 if (changed && netif_running(dev)) {
11029 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11030 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11031 }
11032
11033 return rc;
11034}
11035
11036static u32 bnx2x_get_rx_csum(struct net_device *dev)
11037{
11038 struct bnx2x *bp = netdev_priv(dev);
11039
11040 return bp->rx_csum;
11041}
11042
11043static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11044{
11045 struct bnx2x *bp = netdev_priv(dev);
11046 int rc = 0;
11047
11048 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11049 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11050 return -EAGAIN;
11051 }
11052
11053 bp->rx_csum = data;
11054
11055 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11056 TPA'ed packets will be discarded due to wrong TCP CSUM */
11057 if (!data) {
11058 u32 flags = ethtool_op_get_flags(dev);
11059
11060 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11061 }
11062
11063 return rc;
11064}
11065
11066static int bnx2x_set_tso(struct net_device *dev, u32 data)
11067{
11068 if (data) {
11069 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11070 dev->features |= NETIF_F_TSO6;
11071 } else {
11072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11073 dev->features &= ~NETIF_F_TSO6;
11074 }
11075
11076 return 0;
11077}
11078
11079static const struct {
11080 char string[ETH_GSTRING_LEN];
11081} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11082 { "register_test (offline)" },
11083 { "memory_test (offline)" },
11084 { "loopback_test (offline)" },
11085 { "nvram_test (online)" },
11086 { "interrupt_test (online)" },
11087 { "link_test (online)" },
11088 { "idle check (online)" }
11089};
11090
11091static int bnx2x_test_registers(struct bnx2x *bp)
11092{
11093 int idx, i, rc = -ENODEV;
11094 u32 wr_val = 0;
11095 int port = BP_PORT(bp);
11096 static const struct {
11097 u32 offset0;
11098 u32 offset1;
11099 u32 mask;
11100 } reg_tbl[] = {
11101/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11102 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11103 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11104 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11105 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11106 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11107 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11108 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11109 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11110 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11111/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11112 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11113 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11114 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11115 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11116 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11117 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11118 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11119 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11120 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11121/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11122 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11123 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11124 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11125 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11126 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11127 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11128 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11129 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11130 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11131/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11132 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11133 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11134 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11135 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11136 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11137 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11138
11139 { 0xffffffff, 0, 0x00000000 }
11140 };
11141
11142 if (!netif_running(bp->dev))
11143 return rc;
11144
11145 /* Repeat the test twice:
11146 First by writing 0x00000000, second by writing 0xffffffff */
11147 for (idx = 0; idx < 2; idx++) {
11148
11149 switch (idx) {
11150 case 0:
11151 wr_val = 0;
11152 break;
11153 case 1:
11154 wr_val = 0xffffffff;
11155 break;
11156 }
11157
11158 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11159 u32 offset, mask, save_val, val;
11160
11161 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11162 mask = reg_tbl[i].mask;
11163
11164 save_val = REG_RD(bp, offset);
11165
11166 REG_WR(bp, offset, (wr_val & mask));
11167 val = REG_RD(bp, offset);
11168
11169 /* Restore the original register's value */
11170 REG_WR(bp, offset, save_val);
11171
11172 /* verify value is as expected */
11173 if ((val & mask) != (wr_val & mask)) {
11174 DP(NETIF_MSG_PROBE,
11175 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11176 offset, val, wr_val, mask);
11177 goto test_reg_exit;
11178 }
11179 }
11180 }
11181
11182 rc = 0;
11183
11184test_reg_exit:
11185 return rc;
11186}
11187
11188static int bnx2x_test_memory(struct bnx2x *bp)
11189{
11190 int i, j, rc = -ENODEV;
11191 u32 val;
11192 static const struct {
11193 u32 offset;
11194 int size;
11195 } mem_tbl[] = {
11196 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11197 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11198 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11199 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11200 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11201 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11202 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11203
11204 { 0xffffffff, 0 }
11205 };
11206 static const struct {
11207 char *name;
11208 u32 offset;
11209 u32 e1_mask;
11210 u32 e1h_mask;
11211 } prty_tbl[] = {
11212 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11213 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11214 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11215 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11216 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11217 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11218
11219 { NULL, 0xffffffff, 0, 0 }
11220 };
11221
11222 if (!netif_running(bp->dev))
11223 return rc;
11224
11225 /* Go through all the memories */
11226 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11227 for (j = 0; j < mem_tbl[i].size; j++)
11228 REG_RD(bp, mem_tbl[i].offset + j*4);
11229
11230 /* Check the parity status */
11231 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11232 val = REG_RD(bp, prty_tbl[i].offset);
11233 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11234 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11235 DP(NETIF_MSG_HW,
11236 "%s is 0x%x\n", prty_tbl[i].name, val);
11237 goto test_mem_exit;
11238 }
11239 }
11240
11241 rc = 0;
11242
11243test_mem_exit:
11244 return rc;
11245}
11246
11247static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11248{
11249 int cnt = 1000;
11250
11251 if (link_up)
11252 while (bnx2x_link_test(bp) && cnt--)
11253 msleep(10);
11254}
11255
11256static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11257{
11258 unsigned int pkt_size, num_pkts, i;
11259 struct sk_buff *skb;
11260 unsigned char *packet;
11261 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11262 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11263 u16 tx_start_idx, tx_idx;
11264 u16 rx_start_idx, rx_idx;
11265 u16 pkt_prod, bd_prod;
11266 struct sw_tx_bd *tx_buf;
11267 struct eth_tx_start_bd *tx_start_bd;
11268 struct eth_tx_parse_bd *pbd = NULL;
11269 dma_addr_t mapping;
11270 union eth_rx_cqe *cqe;
11271 u8 cqe_fp_flags;
11272 struct sw_rx_bd *rx_buf;
11273 u16 len;
11274 int rc = -ENODEV;
11275
11276 /* check the loopback mode */
11277 switch (loopback_mode) {
11278 case BNX2X_PHY_LOOPBACK:
11279 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11280 return -EINVAL;
11281 break;
11282 case BNX2X_MAC_LOOPBACK:
11283 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11284 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11285 break;
11286 default:
11287 return -EINVAL;
11288 }
11289
11290 /* prepare the loopback packet */
11291 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11292 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11293 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11294 if (!skb) {
11295 rc = -ENOMEM;
11296 goto test_loopback_exit;
11297 }
11298 packet = skb_put(skb, pkt_size);
11299 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11300 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11301 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11302 for (i = ETH_HLEN; i < pkt_size; i++)
11303 packet[i] = (unsigned char) (i & 0xff);
11304
11305 /* send the loopback packet */
11306 num_pkts = 0;
11307 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11308 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11309
11310 pkt_prod = fp_tx->tx_pkt_prod++;
11311 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11312 tx_buf->first_bd = fp_tx->tx_bd_prod;
11313 tx_buf->skb = skb;
11314 tx_buf->flags = 0;
11315
11316 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11317 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11318 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11319 skb_headlen(skb), DMA_TO_DEVICE);
11320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11322 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11323 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11324 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11325 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11326 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11327 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11328
11329 /* turn on parsing and get a BD */
11330 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11331 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11332
11333 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11334
11335 wmb();
11336
11337 fp_tx->tx_db.data.prod += 2;
11338 barrier();
11339 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11340
11341 mmiowb();
11342
11343 num_pkts++;
11344 fp_tx->tx_bd_prod += 2; /* start + pbd */
11345
11346 udelay(100);
11347
11348 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11349 if (tx_idx != tx_start_idx + num_pkts)
11350 goto test_loopback_exit;
11351
11352 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11353 if (rx_idx != rx_start_idx + num_pkts)
11354 goto test_loopback_exit;
11355
11356 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11357 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11358 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11359 goto test_loopback_rx_exit;
11360
11361 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11362 if (len != pkt_size)
11363 goto test_loopback_rx_exit;
11364
11365 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11366 skb = rx_buf->skb;
11367 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11368 for (i = ETH_HLEN; i < pkt_size; i++)
11369 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11370 goto test_loopback_rx_exit;
11371
11372 rc = 0;
11373
11374test_loopback_rx_exit:
11375
11376 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11377 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11378 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11379 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11380
11381 /* Update producers */
11382 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11383 fp_rx->rx_sge_prod);
11384
11385test_loopback_exit:
11386 bp->link_params.loopback_mode = LOOPBACK_NONE;
11387
11388 return rc;
11389}
11390
11391static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11392{
11393 int rc = 0, res;
11394
11395 if (BP_NOMCP(bp))
11396 return rc;
11397
11398 if (!netif_running(bp->dev))
11399 return BNX2X_LOOPBACK_FAILED;
11400
11401 bnx2x_netif_stop(bp, 1);
11402 bnx2x_acquire_phy_lock(bp);
11403
11404 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11405 if (res) {
11406 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11407 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11408 }
11409
11410 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11411 if (res) {
11412 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11413 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11414 }
11415
11416 bnx2x_release_phy_lock(bp);
11417 bnx2x_netif_start(bp);
11418
11419 return rc;
11420}
11421
11422#define CRC32_RESIDUAL 0xdebb20e3
11423
11424static int bnx2x_test_nvram(struct bnx2x *bp)
11425{
11426 static const struct {
11427 int offset;
11428 int size;
11429 } nvram_tbl[] = {
11430 { 0, 0x14 }, /* bootstrap */
11431 { 0x14, 0xec }, /* dir */
11432 { 0x100, 0x350 }, /* manuf_info */
11433 { 0x450, 0xf0 }, /* feature_info */
11434 { 0x640, 0x64 }, /* upgrade_key_info */
11435 { 0x6a4, 0x64 },
11436 { 0x708, 0x70 }, /* manuf_key_info */
11437 { 0x778, 0x70 },
11438 { 0, 0 }
11439 };
11440 __be32 buf[0x350 / 4];
11441 u8 *data = (u8 *)buf;
11442 int i, rc;
11443 u32 magic, crc;
11444
11445 if (BP_NOMCP(bp))
11446 return 0;
11447
11448 rc = bnx2x_nvram_read(bp, 0, data, 4);
11449 if (rc) {
11450 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11451 goto test_nvram_exit;
11452 }
11453
11454 magic = be32_to_cpu(buf[0]);
11455 if (magic != 0x669955aa) {
11456 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11457 rc = -ENODEV;
11458 goto test_nvram_exit;
11459 }
11460
11461 for (i = 0; nvram_tbl[i].size; i++) {
11462
11463 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11464 nvram_tbl[i].size);
11465 if (rc) {
11466 DP(NETIF_MSG_PROBE,
11467 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11468 goto test_nvram_exit;
11469 }
11470
11471 crc = ether_crc_le(nvram_tbl[i].size, data);
11472 if (crc != CRC32_RESIDUAL) {
11473 DP(NETIF_MSG_PROBE,
11474 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11475 rc = -ENODEV;
11476 goto test_nvram_exit;
11477 }
11478 }
11479
11480test_nvram_exit:
11481 return rc;
11482}
11483
11484static int bnx2x_test_intr(struct bnx2x *bp)
11485{
11486 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11487 int i, rc;
11488
11489 if (!netif_running(bp->dev))
11490 return -ENODEV;
11491
11492 config->hdr.length = 0;
11493 if (CHIP_IS_E1(bp))
11494 /* use last unicast entries */
11495 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11496 else
11497 config->hdr.offset = BP_FUNC(bp);
11498 config->hdr.client_id = bp->fp->cl_id;
11499 config->hdr.reserved1 = 0;
11500
11501 bp->set_mac_pending++;
11502 smp_wmb();
11503 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11504 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11505 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11506 if (rc == 0) {
11507 for (i = 0; i < 10; i++) {
11508 if (!bp->set_mac_pending)
11509 break;
11510 smp_rmb();
11511 msleep_interruptible(10);
11512 }
11513 if (i == 10)
11514 rc = -ENODEV;
11515 }
11516
11517 return rc;
11518}
11519
11520static void bnx2x_self_test(struct net_device *dev,
11521 struct ethtool_test *etest, u64 *buf)
11522{
11523 struct bnx2x *bp = netdev_priv(dev);
11524
11525 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11526 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11527 etest->flags |= ETH_TEST_FL_FAILED;
11528 return;
11529 }
11530
11531 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11532
11533 if (!netif_running(dev))
11534 return;
11535
11536 /* offline tests are not supported in MF mode */
11537 if (IS_E1HMF(bp))
11538 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11539
11540 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11541 int port = BP_PORT(bp);
11542 u32 val;
11543 u8 link_up;
11544
11545 /* save current value of input enable for TX port IF */
11546 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11547 /* disable input for TX port IF */
11548 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11549
11550 link_up = (bnx2x_link_test(bp) == 0);
11551 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11552 bnx2x_nic_load(bp, LOAD_DIAG);
11553 /* wait until link state is restored */
11554 bnx2x_wait_for_link(bp, link_up);
11555
11556 if (bnx2x_test_registers(bp) != 0) {
11557 buf[0] = 1;
11558 etest->flags |= ETH_TEST_FL_FAILED;
11559 }
11560 if (bnx2x_test_memory(bp) != 0) {
11561 buf[1] = 1;
11562 etest->flags |= ETH_TEST_FL_FAILED;
11563 }
11564 buf[2] = bnx2x_test_loopback(bp, link_up);
11565 if (buf[2] != 0)
11566 etest->flags |= ETH_TEST_FL_FAILED;
11567
11568 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11569
11570 /* restore input for TX port IF */
11571 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11572
11573 bnx2x_nic_load(bp, LOAD_NORMAL);
11574 /* wait until link state is restored */
11575 bnx2x_wait_for_link(bp, link_up);
11576 }
11577 if (bnx2x_test_nvram(bp) != 0) {
11578 buf[3] = 1;
11579 etest->flags |= ETH_TEST_FL_FAILED;
11580 }
11581 if (bnx2x_test_intr(bp) != 0) {
11582 buf[4] = 1;
11583 etest->flags |= ETH_TEST_FL_FAILED;
11584 }
11585 if (bp->port.pmf)
11586 if (bnx2x_link_test(bp) != 0) {
11587 buf[5] = 1;
11588 etest->flags |= ETH_TEST_FL_FAILED;
11589 }
11590
11591#ifdef BNX2X_EXTRA_DEBUG
11592 bnx2x_panic_dump(bp);
11593#endif
11594}
11595
11596static const struct {
11597 long offset;
11598 int size;
11599 u8 string[ETH_GSTRING_LEN];
11600} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11601/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11602 { Q_STATS_OFFSET32(error_bytes_received_hi),
11603 8, "[%d]: rx_error_bytes" },
11604 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11605 8, "[%d]: rx_ucast_packets" },
11606 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11607 8, "[%d]: rx_mcast_packets" },
11608 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11609 8, "[%d]: rx_bcast_packets" },
11610 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11611 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11612 4, "[%d]: rx_phy_ip_err_discards"},
11613 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11614 4, "[%d]: rx_skb_alloc_discard" },
11615 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11616
11617/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11618 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11619 8, "[%d]: tx_ucast_packets" },
11620 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11621 8, "[%d]: tx_mcast_packets" },
11622 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11623 8, "[%d]: tx_bcast_packets" }
11624};
11625
11626static const struct {
11627 long offset;
11628 int size;
11629 u32 flags;
11630#define STATS_FLAGS_PORT 1
11631#define STATS_FLAGS_FUNC 2
11632#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11633 u8 string[ETH_GSTRING_LEN];
11634} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11635/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11636 8, STATS_FLAGS_BOTH, "rx_bytes" },
11637 { STATS_OFFSET32(error_bytes_received_hi),
11638 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11639 { STATS_OFFSET32(total_unicast_packets_received_hi),
11640 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11641 { STATS_OFFSET32(total_multicast_packets_received_hi),
11642 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11643 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11644 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11645 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11646 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11647 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11648 8, STATS_FLAGS_PORT, "rx_align_errors" },
11649 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11650 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11651 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11652 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11653/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11654 8, STATS_FLAGS_PORT, "rx_fragments" },
11655 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11656 8, STATS_FLAGS_PORT, "rx_jabbers" },
11657 { STATS_OFFSET32(no_buff_discard_hi),
11658 8, STATS_FLAGS_BOTH, "rx_discards" },
11659 { STATS_OFFSET32(mac_filter_discard),
11660 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11661 { STATS_OFFSET32(xxoverflow_discard),
11662 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11663 { STATS_OFFSET32(brb_drop_hi),
11664 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11665 { STATS_OFFSET32(brb_truncate_hi),
11666 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11667 { STATS_OFFSET32(pause_frames_received_hi),
11668 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11669 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11670 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11671 { STATS_OFFSET32(nig_timer_max),
11672 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11673/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11674 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11675 { STATS_OFFSET32(rx_skb_alloc_failed),
11676 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11677 { STATS_OFFSET32(hw_csum_err),
11678 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11679
11680 { STATS_OFFSET32(total_bytes_transmitted_hi),
11681 8, STATS_FLAGS_BOTH, "tx_bytes" },
11682 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11683 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11684 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11685 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11686 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11687 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11688 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11689 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11690 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11691 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11692 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11693 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11694/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11695 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11696 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11697 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11698 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11699 8, STATS_FLAGS_PORT, "tx_deferred" },
11700 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11701 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11702 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11703 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11704 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11705 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11706 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11707 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11708 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11709 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11710 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11711 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11712 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11713 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11714/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11715 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11716 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11717 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11718 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11719 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11720 { STATS_OFFSET32(pause_frames_sent_hi),
11721 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11722};
11723
11724#define IS_PORT_STAT(i) \
11725 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11726#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11727#define IS_E1HMF_MODE_STAT(bp) \
11728 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11729
11730static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11731{
11732 struct bnx2x *bp = netdev_priv(dev);
11733 int i, num_stats;
11734
11735 switch (stringset) {
11736 case ETH_SS_STATS:
11737 if (is_multi(bp)) {
11738 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11739 if (!IS_E1HMF_MODE_STAT(bp))
11740 num_stats += BNX2X_NUM_STATS;
11741 } else {
11742 if (IS_E1HMF_MODE_STAT(bp)) {
11743 num_stats = 0;
11744 for (i = 0; i < BNX2X_NUM_STATS; i++)
11745 if (IS_FUNC_STAT(i))
11746 num_stats++;
11747 } else
11748 num_stats = BNX2X_NUM_STATS;
11749 }
11750 return num_stats;
11751
11752 case ETH_SS_TEST:
11753 return BNX2X_NUM_TESTS;
11754
11755 default:
11756 return -EINVAL;
11757 }
11758}
11759
11760static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11761{
11762 struct bnx2x *bp = netdev_priv(dev);
11763 int i, j, k;
11764
11765 switch (stringset) {
11766 case ETH_SS_STATS:
11767 if (is_multi(bp)) {
11768 k = 0;
11769 for_each_queue(bp, i) {
11770 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11771 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11772 bnx2x_q_stats_arr[j].string, i);
11773 k += BNX2X_NUM_Q_STATS;
11774 }
11775 if (IS_E1HMF_MODE_STAT(bp))
11776 break;
11777 for (j = 0; j < BNX2X_NUM_STATS; j++)
11778 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11779 bnx2x_stats_arr[j].string);
11780 } else {
11781 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11782 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11783 continue;
11784 strcpy(buf + j*ETH_GSTRING_LEN,
11785 bnx2x_stats_arr[i].string);
11786 j++;
11787 }
11788 }
11789 break;
11790
11791 case ETH_SS_TEST:
11792 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11793 break;
11794 }
11795}
11796
11797static void bnx2x_get_ethtool_stats(struct net_device *dev,
11798 struct ethtool_stats *stats, u64 *buf)
11799{
11800 struct bnx2x *bp = netdev_priv(dev);
11801 u32 *hw_stats, *offset;
11802 int i, j, k;
11803
11804 if (is_multi(bp)) {
11805 k = 0;
11806 for_each_queue(bp, i) {
11807 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11808 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11809 if (bnx2x_q_stats_arr[j].size == 0) {
11810 /* skip this counter */
11811 buf[k + j] = 0;
11812 continue;
11813 }
11814 offset = (hw_stats +
11815 bnx2x_q_stats_arr[j].offset);
11816 if (bnx2x_q_stats_arr[j].size == 4) {
11817 /* 4-byte counter */
11818 buf[k + j] = (u64) *offset;
11819 continue;
11820 }
11821 /* 8-byte counter */
11822 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11823 }
11824 k += BNX2X_NUM_Q_STATS;
11825 }
11826 if (IS_E1HMF_MODE_STAT(bp))
11827 return;
11828 hw_stats = (u32 *)&bp->eth_stats;
11829 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11830 if (bnx2x_stats_arr[j].size == 0) {
11831 /* skip this counter */
11832 buf[k + j] = 0;
11833 continue;
11834 }
11835 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11836 if (bnx2x_stats_arr[j].size == 4) {
11837 /* 4-byte counter */
11838 buf[k + j] = (u64) *offset;
11839 continue;
11840 }
11841 /* 8-byte counter */
11842 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11843 }
11844 } else {
11845 hw_stats = (u32 *)&bp->eth_stats;
11846 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11847 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11848 continue;
11849 if (bnx2x_stats_arr[i].size == 0) {
11850 /* skip this counter */
11851 buf[j] = 0;
11852 j++;
11853 continue;
11854 }
11855 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11856 if (bnx2x_stats_arr[i].size == 4) {
11857 /* 4-byte counter */
11858 buf[j] = (u64) *offset;
11859 j++;
11860 continue;
11861 }
11862 /* 8-byte counter */
11863 buf[j] = HILO_U64(*offset, *(offset + 1));
11864 j++;
11865 }
11866 }
11867}
11868
11869static int bnx2x_phys_id(struct net_device *dev, u32 data)
11870{
11871 struct bnx2x *bp = netdev_priv(dev);
11872 int i;
11873
11874 if (!netif_running(dev))
11875 return 0;
11876
11877 if (!bp->port.pmf)
11878 return 0;
11879
11880 if (data == 0)
11881 data = 2;
11882
11883 for (i = 0; i < (data * 2); i++) {
11884 if ((i % 2) == 0)
11885 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11886 SPEED_1000);
11887 else
11888 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11889
11890 msleep_interruptible(500);
11891 if (signal_pending(current))
11892 break;
11893 }
11894
11895 if (bp->link_vars.link_up)
11896 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11897 bp->link_vars.line_speed);
11898
11899 return 0;
11900}
11901
11902static const struct ethtool_ops bnx2x_ethtool_ops = {
11903 .get_settings = bnx2x_get_settings,
11904 .set_settings = bnx2x_set_settings,
11905 .get_drvinfo = bnx2x_get_drvinfo,
11906 .get_regs_len = bnx2x_get_regs_len,
11907 .get_regs = bnx2x_get_regs,
11908 .get_wol = bnx2x_get_wol,
11909 .set_wol = bnx2x_set_wol,
11910 .get_msglevel = bnx2x_get_msglevel,
11911 .set_msglevel = bnx2x_set_msglevel,
11912 .nway_reset = bnx2x_nway_reset,
11913 .get_link = bnx2x_get_link,
11914 .get_eeprom_len = bnx2x_get_eeprom_len,
11915 .get_eeprom = bnx2x_get_eeprom,
11916 .set_eeprom = bnx2x_set_eeprom,
11917 .get_coalesce = bnx2x_get_coalesce,
11918 .set_coalesce = bnx2x_set_coalesce,
11919 .get_ringparam = bnx2x_get_ringparam,
11920 .set_ringparam = bnx2x_set_ringparam,
11921 .get_pauseparam = bnx2x_get_pauseparam,
11922 .set_pauseparam = bnx2x_set_pauseparam,
11923 .get_rx_csum = bnx2x_get_rx_csum,
11924 .set_rx_csum = bnx2x_set_rx_csum,
11925 .get_tx_csum = ethtool_op_get_tx_csum,
11926 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11927 .set_flags = bnx2x_set_flags,
11928 .get_flags = ethtool_op_get_flags,
11929 .get_sg = ethtool_op_get_sg,
11930 .set_sg = ethtool_op_set_sg,
11931 .get_tso = ethtool_op_get_tso,
11932 .set_tso = bnx2x_set_tso,
11933 .self_test = bnx2x_self_test,
11934 .get_sset_count = bnx2x_get_sset_count,
11935 .get_strings = bnx2x_get_strings,
11936 .phys_id = bnx2x_phys_id,
11937 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11938};
11939
11940/* end of ethtool_ops */
11941
11942/****************************************************************************
11943* General service functions
11944****************************************************************************/
11945
11946static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11947{
11948 u16 pmcsr;
11949
11950 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11951
11952 switch (state) {
11953 case PCI_D0:
11954 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11955 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11956 PCI_PM_CTRL_PME_STATUS));
11957
11958 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11959 /* delay required during transition out of D3hot */
11960 msleep(20);
11961 break;
11962
11963 case PCI_D3hot:
11964 /* If there are other clients above don't
11965 shut down the power */
11966 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11967 return 0;
11968 /* Don't shut down the power for emulation and FPGA */
11969 if (CHIP_REV_IS_SLOW(bp))
11970 return 0;
11971
11972 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11973 pmcsr |= 3;
11974
11975 if (bp->wol)
11976 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11977
11978 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11979 pmcsr);
11980
11981 /* No more memory access after this point until
11982 * device is brought back to D0.
11983 */
11984 break;
11985
11986 default:
11987 return -EINVAL;
11988 }
11989 return 0;
11990}
11991
11992static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11993{
11994 u16 rx_cons_sb;
11995
11996 /* Tell compiler that status block fields can change */
11997 barrier();
11998 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11999 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
12000 rx_cons_sb++;
12001 return (fp->rx_comp_cons != rx_cons_sb);
12002}
12003
12004/*
12005 * net_device service functions
12006 */
12007
12008static int bnx2x_poll(struct napi_struct *napi, int budget)
12009{
12010 int work_done = 0;
12011 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12012 napi);
12013 struct bnx2x *bp = fp->bp;
12014
12015 while (1) {
12016#ifdef BNX2X_STOP_ON_ERROR
12017 if (unlikely(bp->panic)) {
12018 napi_complete(napi);
12019 return 0;
12020 }
12021#endif
12022
12023 if (bnx2x_has_tx_work(fp))
12024 bnx2x_tx_int(fp);
12025
12026 if (bnx2x_has_rx_work(fp)) {
12027 work_done += bnx2x_rx_int(fp, budget - work_done);
12028
12029 /* must not complete if we consumed full budget */
12030 if (work_done >= budget)
12031 break;
12032 }
12033
12034 /* Fall out from the NAPI loop if needed */
12035 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12036 bnx2x_update_fpsb_idx(fp);
12037 /* bnx2x_has_rx_work() reads the status block, thus we need
12038 * to ensure that status block indices have been actually read
12039 * (bnx2x_update_fpsb_idx) prior to this check
12040 * (bnx2x_has_rx_work) so that we won't write the "newer"
12041 * value of the status block to IGU (if there was a DMA right
12042 * after bnx2x_has_rx_work and if there is no rmb, the memory
12043 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12044 * before bnx2x_ack_sb). In this case there will never be
12045 * another interrupt until there is another update of the
12046 * status block, while there is still unhandled work.
12047 */
12048 rmb();
12049
12050 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12051 napi_complete(napi);
12052 /* Re-enable interrupts */
12053 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12054 le16_to_cpu(fp->fp_c_idx),
12055 IGU_INT_NOP, 1);
12056 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12057 le16_to_cpu(fp->fp_u_idx),
12058 IGU_INT_ENABLE, 1);
12059 break;
12060 }
12061 }
12062 }
12063
12064 return work_done;
12065}
12066
12067
12068/* we split the first BD into headers and data BDs
12069 * to ease the pain of our fellow microcode engineers
12070 * we use one mapping for both BDs
12071 * So far this has only been observed to happen
12072 * in Other Operating Systems(TM)
12073 */
12074static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12075 struct bnx2x_fastpath *fp,
12076 struct sw_tx_bd *tx_buf,
12077 struct eth_tx_start_bd **tx_bd, u16 hlen,
12078 u16 bd_prod, int nbd)
12079{
12080 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12081 struct eth_tx_bd *d_tx_bd;
12082 dma_addr_t mapping;
12083 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12084
12085 /* first fix first BD */
12086 h_tx_bd->nbd = cpu_to_le16(nbd);
12087 h_tx_bd->nbytes = cpu_to_le16(hlen);
12088
12089 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12090 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12091 h_tx_bd->addr_lo, h_tx_bd->nbd);
12092
12093 /* now get a new data BD
12094 * (after the pbd) and fill it */
12095 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12096 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12097
12098 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12099 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12100
12101 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12102 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12103 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12104
12105 /* this marks the BD as one that has no individual mapping */
12106 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12107
12108 DP(NETIF_MSG_TX_QUEUED,
12109 "TSO split data size is %d (%x:%x)\n",
12110 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12111
12112 /* update tx_bd */
12113 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12114
12115 return bd_prod;
12116}
12117
12118static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12119{
12120 if (fix > 0)
12121 csum = (u16) ~csum_fold(csum_sub(csum,
12122 csum_partial(t_header - fix, fix, 0)));
12123
12124 else if (fix < 0)
12125 csum = (u16) ~csum_fold(csum_add(csum,
12126 csum_partial(t_header, -fix, 0)));
12127
12128 return swab16(csum);
12129}
12130
12131static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12132{
12133 u32 rc;
12134
12135 if (skb->ip_summed != CHECKSUM_PARTIAL)
12136 rc = XMIT_PLAIN;
12137
12138 else {
12139 if (skb->protocol == htons(ETH_P_IPV6)) {
12140 rc = XMIT_CSUM_V6;
12141 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12142 rc |= XMIT_CSUM_TCP;
12143
12144 } else {
12145 rc = XMIT_CSUM_V4;
12146 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12147 rc |= XMIT_CSUM_TCP;
12148 }
12149 }
12150
12151 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12152 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12153
12154 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12155 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12156
12157 return rc;
12158}
12159
12160#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12161/* check if packet requires linearization (packet is too fragmented)
12162 no need to check fragmentation if page size > 8K (there will be no
12163 violation to FW restrictions) */
12164static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12165 u32 xmit_type)
12166{
12167 int to_copy = 0;
12168 int hlen = 0;
12169 int first_bd_sz = 0;
12170
12171 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12172 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12173
12174 if (xmit_type & XMIT_GSO) {
12175 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12176 /* Check if LSO packet needs to be copied:
12177 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12178 int wnd_size = MAX_FETCH_BD - 3;
12179 /* Number of windows to check */
12180 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12181 int wnd_idx = 0;
12182 int frag_idx = 0;
12183 u32 wnd_sum = 0;
12184
12185 /* Headers length */
12186 hlen = (int)(skb_transport_header(skb) - skb->data) +
12187 tcp_hdrlen(skb);
12188
12189 /* Amount of data (w/o headers) on linear part of SKB*/
12190 first_bd_sz = skb_headlen(skb) - hlen;
12191
12192 wnd_sum = first_bd_sz;
12193
12194 /* Calculate the first sum - it's special */
12195 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12196 wnd_sum +=
12197 skb_shinfo(skb)->frags[frag_idx].size;
12198
12199 /* If there was data on linear skb data - check it */
12200 if (first_bd_sz > 0) {
12201 if (unlikely(wnd_sum < lso_mss)) {
12202 to_copy = 1;
12203 goto exit_lbl;
12204 }
12205
12206 wnd_sum -= first_bd_sz;
12207 }
12208
12209 /* Others are easier: run through the frag list and
12210 check all windows */
12211 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12212 wnd_sum +=
12213 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12214
12215 if (unlikely(wnd_sum < lso_mss)) {
12216 to_copy = 1;
12217 break;
12218 }
12219 wnd_sum -=
12220 skb_shinfo(skb)->frags[wnd_idx].size;
12221 }
12222 } else {
12223 /* in non-LSO too fragmented packet should always
12224 be linearized */
12225 to_copy = 1;
12226 }
12227 }
12228
12229exit_lbl:
12230 if (unlikely(to_copy))
12231 DP(NETIF_MSG_TX_QUEUED,
12232 "Linearization IS REQUIRED for %s packet. "
12233 "num_frags %d hlen %d first_bd_sz %d\n",
12234 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12235 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12236
12237 return to_copy;
12238}
12239#endif
12240
12241/* called with netif_tx_lock
12242 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12243 * netif_wake_queue()
12244 */
12245static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12246{
12247 struct bnx2x *bp = netdev_priv(dev);
12248 struct bnx2x_fastpath *fp;
12249 struct netdev_queue *txq;
12250 struct sw_tx_bd *tx_buf;
12251 struct eth_tx_start_bd *tx_start_bd;
12252 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12253 struct eth_tx_parse_bd *pbd = NULL;
12254 u16 pkt_prod, bd_prod;
12255 int nbd, fp_index;
12256 dma_addr_t mapping;
12257 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12258 int i;
12259 u8 hlen = 0;
12260 __le16 pkt_size = 0;
12261 struct ethhdr *eth;
12262 u8 mac_type = UNICAST_ADDRESS;
12263
12264#ifdef BNX2X_STOP_ON_ERROR
12265 if (unlikely(bp->panic))
12266 return NETDEV_TX_BUSY;
12267#endif
12268
12269 fp_index = skb_get_queue_mapping(skb);
12270 txq = netdev_get_tx_queue(dev, fp_index);
12271
12272 fp = &bp->fp[fp_index];
12273
12274 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12275 fp->eth_q_stats.driver_xoff++;
12276 netif_tx_stop_queue(txq);
12277 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12278 return NETDEV_TX_BUSY;
12279 }
12280
12281 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12282 " gso type %x xmit_type %x\n",
12283 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12284 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12285
12286 eth = (struct ethhdr *)skb->data;
12287
12288 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12289 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12290 if (is_broadcast_ether_addr(eth->h_dest))
12291 mac_type = BROADCAST_ADDRESS;
12292 else
12293 mac_type = MULTICAST_ADDRESS;
12294 }
12295
12296#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12297 /* First, check if we need to linearize the skb (due to FW
12298 restrictions). No need to check fragmentation if page size > 8K
12299 (there will be no violation to FW restrictions) */
12300 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12301 /* Statistics of linearization */
12302 bp->lin_cnt++;
12303 if (skb_linearize(skb) != 0) {
12304 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12305 "silently dropping this SKB\n");
12306 dev_kfree_skb_any(skb);
12307 return NETDEV_TX_OK;
12308 }
12309 }
12310#endif
12311
12312 /*
12313 Please read carefully. First we use one BD which we mark as start,
12314 then we have a parsing info BD (used for TSO or xsum),
12315 and only then we have the rest of the TSO BDs.
12316 (don't forget to mark the last one as last,
12317 and to unmap only AFTER you write to the BD ...)
12318 And above all, all pdb sizes are in words - NOT DWORDS!
12319 */
12320
12321 pkt_prod = fp->tx_pkt_prod++;
12322 bd_prod = TX_BD(fp->tx_bd_prod);
12323
12324 /* get a tx_buf and first BD */
12325 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12326 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12327
12328 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12329 tx_start_bd->general_data = (mac_type <<
12330 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12331 /* header nbd */
12332 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12333
12334 /* remember the first BD of the packet */
12335 tx_buf->first_bd = fp->tx_bd_prod;
12336 tx_buf->skb = skb;
12337 tx_buf->flags = 0;
12338
12339 DP(NETIF_MSG_TX_QUEUED,
12340 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12341 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12342
12343#ifdef BCM_VLAN
12344 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12345 (bp->flags & HW_VLAN_TX_FLAG)) {
12346 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12347 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12348 } else
12349#endif
12350 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12351
12352 /* turn on parsing and get a BD */
12353 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12354 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12355
12356 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12357
12358 if (xmit_type & XMIT_CSUM) {
12359 hlen = (skb_network_header(skb) - skb->data) / 2;
12360
12361 /* for now NS flag is not used in Linux */
12362 pbd->global_data =
12363 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12364 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12365
12366 pbd->ip_hlen = (skb_transport_header(skb) -
12367 skb_network_header(skb)) / 2;
12368
12369 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12370
12371 pbd->total_hlen = cpu_to_le16(hlen);
12372 hlen = hlen*2;
12373
12374 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12375
12376 if (xmit_type & XMIT_CSUM_V4)
12377 tx_start_bd->bd_flags.as_bitfield |=
12378 ETH_TX_BD_FLAGS_IP_CSUM;
12379 else
12380 tx_start_bd->bd_flags.as_bitfield |=
12381 ETH_TX_BD_FLAGS_IPV6;
12382
12383 if (xmit_type & XMIT_CSUM_TCP) {
12384 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12385
12386 } else {
12387 s8 fix = SKB_CS_OFF(skb); /* signed! */
12388
12389 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12390
12391 DP(NETIF_MSG_TX_QUEUED,
12392 "hlen %d fix %d csum before fix %x\n",
12393 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12394
12395 /* HW bug: fixup the CSUM */
12396 pbd->tcp_pseudo_csum =
12397 bnx2x_csum_fix(skb_transport_header(skb),
12398 SKB_CS(skb), fix);
12399
12400 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12401 pbd->tcp_pseudo_csum);
12402 }
12403 }
12404
12405 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12406 skb_headlen(skb), DMA_TO_DEVICE);
12407
12408 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12409 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12410 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12411 tx_start_bd->nbd = cpu_to_le16(nbd);
12412 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12413 pkt_size = tx_start_bd->nbytes;
12414
12415 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12416 " nbytes %d flags %x vlan %x\n",
12417 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12418 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12419 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12420
12421 if (xmit_type & XMIT_GSO) {
12422
12423 DP(NETIF_MSG_TX_QUEUED,
12424 "TSO packet len %d hlen %d total len %d tso size %d\n",
12425 skb->len, hlen, skb_headlen(skb),
12426 skb_shinfo(skb)->gso_size);
12427
12428 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12429
12430 if (unlikely(skb_headlen(skb) > hlen))
12431 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12432 hlen, bd_prod, ++nbd);
12433
12434 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12435 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12436 pbd->tcp_flags = pbd_tcp_flags(skb);
12437
12438 if (xmit_type & XMIT_GSO_V4) {
12439 pbd->ip_id = swab16(ip_hdr(skb)->id);
12440 pbd->tcp_pseudo_csum =
12441 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12442 ip_hdr(skb)->daddr,
12443 0, IPPROTO_TCP, 0));
12444
12445 } else
12446 pbd->tcp_pseudo_csum =
12447 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12448 &ipv6_hdr(skb)->daddr,
12449 0, IPPROTO_TCP, 0));
12450
12451 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12452 }
12453 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12454
12455 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12456 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12457
12458 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12459 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12460 if (total_pkt_bd == NULL)
12461 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12462
12463 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12464 frag->page_offset,
12465 frag->size, DMA_TO_DEVICE);
12466
12467 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12468 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12469 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12470 le16_add_cpu(&pkt_size, frag->size);
12471
12472 DP(NETIF_MSG_TX_QUEUED,
12473 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12474 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12475 le16_to_cpu(tx_data_bd->nbytes));
12476 }
12477
12478 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12479
12480 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12481
12482 /* now send a tx doorbell, counting the next BD
12483 * if the packet contains or ends with it
12484 */
12485 if (TX_BD_POFF(bd_prod) < nbd)
12486 nbd++;
12487
12488 if (total_pkt_bd != NULL)
12489 total_pkt_bd->total_pkt_bytes = pkt_size;
12490
12491 if (pbd)
12492 DP(NETIF_MSG_TX_QUEUED,
12493 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12494 " tcp_flags %x xsum %x seq %u hlen %u\n",
12495 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12496 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12497 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12498
12499 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12500
12501 /*
12502 * Make sure that the BD data is updated before updating the producer
12503 * since FW might read the BD right after the producer is updated.
12504 * This is only applicable for weak-ordered memory model archs such
12505 * as IA-64. The following barrier is also mandatory since FW will
12506 * assumes packets must have BDs.
12507 */
12508 wmb();
12509
12510 fp->tx_db.data.prod += nbd;
12511 barrier();
12512 DOORBELL(bp, fp->index, fp->tx_db.raw);
12513
12514 mmiowb();
12515
12516 fp->tx_bd_prod += nbd;
12517
12518 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12519 netif_tx_stop_queue(txq);
12520
12521 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12522 * ordering of set_bit() in netif_tx_stop_queue() and read of
12523 * fp->bd_tx_cons */
12524 smp_mb();
12525
12526 fp->eth_q_stats.driver_xoff++;
12527 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12528 netif_tx_wake_queue(txq);
12529 }
12530 fp->tx_pkt++;
12531
12532 return NETDEV_TX_OK;
12533}
12534
12535/* called with rtnl_lock */
12536static int bnx2x_open(struct net_device *dev)
12537{
12538 struct bnx2x *bp = netdev_priv(dev);
12539
12540 netif_carrier_off(dev);
12541
12542 bnx2x_set_power_state(bp, PCI_D0);
12543
12544 if (!bnx2x_reset_is_done(bp)) {
12545 do {
12546 /* Reset MCP mail box sequence if there is on going
12547 * recovery
12548 */
12549 bp->fw_seq = 0;
12550
12551 /* If it's the first function to load and reset done
12552 * is still not cleared it may mean that. We don't
12553 * check the attention state here because it may have
12554 * already been cleared by a "common" reset but we
12555 * shell proceed with "process kill" anyway.
12556 */
12557 if ((bnx2x_get_load_cnt(bp) == 0) &&
12558 bnx2x_trylock_hw_lock(bp,
12559 HW_LOCK_RESOURCE_RESERVED_08) &&
12560 (!bnx2x_leader_reset(bp))) {
12561 DP(NETIF_MSG_HW, "Recovered in open\n");
12562 break;
12563 }
12564
12565 bnx2x_set_power_state(bp, PCI_D3hot);
12566
12567 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12568 " completed yet. Try again later. If u still see this"
12569 " message after a few retries then power cycle is"
12570 " required.\n", bp->dev->name);
12571
12572 return -EAGAIN;
12573 } while (0);
12574 }
12575
12576 bp->recovery_state = BNX2X_RECOVERY_DONE;
12577
12578 return bnx2x_nic_load(bp, LOAD_OPEN);
12579}
12580
12581/* called with rtnl_lock */
12582static int bnx2x_close(struct net_device *dev)
12583{
12584 struct bnx2x *bp = netdev_priv(dev);
12585
12586 /* Unload the driver, release IRQs */
12587 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12588 bnx2x_set_power_state(bp, PCI_D3hot);
12589
12590 return 0;
12591}
12592
12593/* called with netif_tx_lock from dev_mcast.c */
12594static void bnx2x_set_rx_mode(struct net_device *dev)
12595{
12596 struct bnx2x *bp = netdev_priv(dev);
12597 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12598 int port = BP_PORT(bp);
12599
12600 if (bp->state != BNX2X_STATE_OPEN) {
12601 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12602 return;
12603 }
12604
12605 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12606
12607 if (dev->flags & IFF_PROMISC)
12608 rx_mode = BNX2X_RX_MODE_PROMISC;
12609
12610 else if ((dev->flags & IFF_ALLMULTI) ||
12611 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12612 CHIP_IS_E1(bp)))
12613 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12614
12615 else { /* some multicasts */
12616 if (CHIP_IS_E1(bp)) {
12617 int i, old, offset;
12618 struct netdev_hw_addr *ha;
12619 struct mac_configuration_cmd *config =
12620 bnx2x_sp(bp, mcast_config);
12621
12622 i = 0;
12623 netdev_for_each_mc_addr(ha, dev) {
12624 config->config_table[i].
12625 cam_entry.msb_mac_addr =
12626 swab16(*(u16 *)&ha->addr[0]);
12627 config->config_table[i].
12628 cam_entry.middle_mac_addr =
12629 swab16(*(u16 *)&ha->addr[2]);
12630 config->config_table[i].
12631 cam_entry.lsb_mac_addr =
12632 swab16(*(u16 *)&ha->addr[4]);
12633 config->config_table[i].cam_entry.flags =
12634 cpu_to_le16(port);
12635 config->config_table[i].
12636 target_table_entry.flags = 0;
12637 config->config_table[i].target_table_entry.
12638 clients_bit_vector =
12639 cpu_to_le32(1 << BP_L_ID(bp));
12640 config->config_table[i].
12641 target_table_entry.vlan_id = 0;
12642
12643 DP(NETIF_MSG_IFUP,
12644 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12645 config->config_table[i].
12646 cam_entry.msb_mac_addr,
12647 config->config_table[i].
12648 cam_entry.middle_mac_addr,
12649 config->config_table[i].
12650 cam_entry.lsb_mac_addr);
12651 i++;
12652 }
12653 old = config->hdr.length;
12654 if (old > i) {
12655 for (; i < old; i++) {
12656 if (CAM_IS_INVALID(config->
12657 config_table[i])) {
12658 /* already invalidated */
12659 break;
12660 }
12661 /* invalidate */
12662 CAM_INVALIDATE(config->
12663 config_table[i]);
12664 }
12665 }
12666
12667 if (CHIP_REV_IS_SLOW(bp))
12668 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12669 else
12670 offset = BNX2X_MAX_MULTICAST*(1 + port);
12671
12672 config->hdr.length = i;
12673 config->hdr.offset = offset;
12674 config->hdr.client_id = bp->fp->cl_id;
12675 config->hdr.reserved1 = 0;
12676
12677 bp->set_mac_pending++;
12678 smp_wmb();
12679
12680 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12681 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12682 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12683 0);
12684 } else { /* E1H */
12685 /* Accept one or more multicasts */
12686 struct netdev_hw_addr *ha;
12687 u32 mc_filter[MC_HASH_SIZE];
12688 u32 crc, bit, regidx;
12689 int i;
12690
12691 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12692
12693 netdev_for_each_mc_addr(ha, dev) {
12694 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12695 ha->addr);
12696
12697 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12698 bit = (crc >> 24) & 0xff;
12699 regidx = bit >> 5;
12700 bit &= 0x1f;
12701 mc_filter[regidx] |= (1 << bit);
12702 }
12703
12704 for (i = 0; i < MC_HASH_SIZE; i++)
12705 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12706 mc_filter[i]);
12707 }
12708 }
12709
12710 bp->rx_mode = rx_mode;
12711 bnx2x_set_storm_rx_mode(bp);
12712}
12713
12714/* called with rtnl_lock */
12715static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12716{
12717 struct sockaddr *addr = p;
12718 struct bnx2x *bp = netdev_priv(dev);
12719
12720 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12721 return -EINVAL;
12722
12723 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12724 if (netif_running(dev)) {
12725 if (CHIP_IS_E1(bp))
12726 bnx2x_set_eth_mac_addr_e1(bp, 1);
12727 else
12728 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12729 }
12730
12731 return 0;
12732}
12733
12734/* called with rtnl_lock */
12735static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12736 int devad, u16 addr)
12737{
12738 struct bnx2x *bp = netdev_priv(netdev);
12739 u16 value;
12740 int rc;
12741 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12742
12743 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12744 prtad, devad, addr);
12745
12746 if (prtad != bp->mdio.prtad) {
12747 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12748 prtad, bp->mdio.prtad);
12749 return -EINVAL;
12750 }
12751
12752 /* The HW expects different devad if CL22 is used */
12753 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12754
12755 bnx2x_acquire_phy_lock(bp);
12756 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12757 devad, addr, &value);
12758 bnx2x_release_phy_lock(bp);
12759 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12760
12761 if (!rc)
12762 rc = value;
12763 return rc;
12764}
12765
12766/* called with rtnl_lock */
12767static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12768 u16 addr, u16 value)
12769{
12770 struct bnx2x *bp = netdev_priv(netdev);
12771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12772 int rc;
12773
12774 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12775 " value 0x%x\n", prtad, devad, addr, value);
12776
12777 if (prtad != bp->mdio.prtad) {
12778 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12779 prtad, bp->mdio.prtad);
12780 return -EINVAL;
12781 }
12782
12783 /* The HW expects different devad if CL22 is used */
12784 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12785
12786 bnx2x_acquire_phy_lock(bp);
12787 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12788 devad, addr, value);
12789 bnx2x_release_phy_lock(bp);
12790 return rc;
12791}
12792
12793/* called with rtnl_lock */
12794static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12795{
12796 struct bnx2x *bp = netdev_priv(dev);
12797 struct mii_ioctl_data *mdio = if_mii(ifr);
12798
12799 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12800 mdio->phy_id, mdio->reg_num, mdio->val_in);
12801
12802 if (!netif_running(dev))
12803 return -EAGAIN;
12804
12805 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12806}
12807
12808/* called with rtnl_lock */
12809static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12810{
12811 struct bnx2x *bp = netdev_priv(dev);
12812 int rc = 0;
12813
12814 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12815 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12816 return -EAGAIN;
12817 }
12818
12819 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12820 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12821 return -EINVAL;
12822
12823 /* This does not race with packet allocation
12824 * because the actual alloc size is
12825 * only updated as part of load
12826 */
12827 dev->mtu = new_mtu;
12828
12829 if (netif_running(dev)) {
12830 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12831 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12832 }
12833
12834 return rc;
12835}
12836
12837static void bnx2x_tx_timeout(struct net_device *dev)
12838{
12839 struct bnx2x *bp = netdev_priv(dev);
12840
12841#ifdef BNX2X_STOP_ON_ERROR
12842 if (!bp->panic)
12843 bnx2x_panic();
12844#endif
12845 /* This allows the netif to be shutdown gracefully before resetting */
12846 schedule_delayed_work(&bp->reset_task, 0);
12847}
12848
12849#ifdef BCM_VLAN
12850/* called with rtnl_lock */
12851static void bnx2x_vlan_rx_register(struct net_device *dev,
12852 struct vlan_group *vlgrp)
12853{
12854 struct bnx2x *bp = netdev_priv(dev);
12855
12856 bp->vlgrp = vlgrp;
12857
12858 /* Set flags according to the required capabilities */
12859 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12860
12861 if (dev->features & NETIF_F_HW_VLAN_TX)
12862 bp->flags |= HW_VLAN_TX_FLAG;
12863
12864 if (dev->features & NETIF_F_HW_VLAN_RX)
12865 bp->flags |= HW_VLAN_RX_FLAG;
12866
12867 if (netif_running(dev))
12868 bnx2x_set_client_config(bp);
12869}
12870
12871#endif
12872
12873#ifdef CONFIG_NET_POLL_CONTROLLER
12874static void poll_bnx2x(struct net_device *dev)
12875{
12876 struct bnx2x *bp = netdev_priv(dev);
12877
12878 disable_irq(bp->pdev->irq);
12879 bnx2x_interrupt(bp->pdev->irq, dev);
12880 enable_irq(bp->pdev->irq);
12881}
12882#endif
12883
12884static const struct net_device_ops bnx2x_netdev_ops = {
12885 .ndo_open = bnx2x_open,
12886 .ndo_stop = bnx2x_close,
12887 .ndo_start_xmit = bnx2x_start_xmit,
12888 .ndo_set_multicast_list = bnx2x_set_rx_mode,
12889 .ndo_set_mac_address = bnx2x_change_mac_addr,
12890 .ndo_validate_addr = eth_validate_addr,
12891 .ndo_do_ioctl = bnx2x_ioctl,
12892 .ndo_change_mtu = bnx2x_change_mtu,
12893 .ndo_tx_timeout = bnx2x_tx_timeout,
12894#ifdef BCM_VLAN
12895 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12896#endif
12897#ifdef CONFIG_NET_POLL_CONTROLLER
12898 .ndo_poll_controller = poll_bnx2x,
12899#endif
12900};
12901
12902static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12903 struct net_device *dev)
12904{
12905 struct bnx2x *bp;
12906 int rc;
12907
12908 SET_NETDEV_DEV(dev, &pdev->dev);
12909 bp = netdev_priv(dev);
12910
12911 bp->dev = dev;
12912 bp->pdev = pdev;
12913 bp->flags = 0;
12914 bp->func = PCI_FUNC(pdev->devfn);
12915
12916 rc = pci_enable_device(pdev);
12917 if (rc) {
12918 dev_err(&bp->pdev->dev,
12919 "Cannot enable PCI device, aborting\n");
12920 goto err_out;
12921 }
12922
12923 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12924 dev_err(&bp->pdev->dev,
12925 "Cannot find PCI device base address, aborting\n");
12926 rc = -ENODEV;
12927 goto err_out_disable;
12928 }
12929
12930 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12931 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12932 " base address, aborting\n");
12933 rc = -ENODEV;
12934 goto err_out_disable;
12935 }
12936
12937 if (atomic_read(&pdev->enable_cnt) == 1) {
12938 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12939 if (rc) {
12940 dev_err(&bp->pdev->dev,
12941 "Cannot obtain PCI resources, aborting\n");
12942 goto err_out_disable;
12943 }
12944
12945 pci_set_master(pdev);
12946 pci_save_state(pdev);
12947 }
12948
12949 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12950 if (bp->pm_cap == 0) {
12951 dev_err(&bp->pdev->dev,
12952 "Cannot find power management capability, aborting\n");
12953 rc = -EIO;
12954 goto err_out_release;
12955 }
12956
12957 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12958 if (bp->pcie_cap == 0) {
12959 dev_err(&bp->pdev->dev,
12960 "Cannot find PCI Express capability, aborting\n");
12961 rc = -EIO;
12962 goto err_out_release;
12963 }
12964
12965 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12966 bp->flags |= USING_DAC_FLAG;
12967 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12968 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12969 " failed, aborting\n");
12970 rc = -EIO;
12971 goto err_out_release;
12972 }
12973
12974 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12975 dev_err(&bp->pdev->dev,
12976 "System does not support DMA, aborting\n");
12977 rc = -EIO;
12978 goto err_out_release;
12979 }
12980
12981 dev->mem_start = pci_resource_start(pdev, 0);
12982 dev->base_addr = dev->mem_start;
12983 dev->mem_end = pci_resource_end(pdev, 0);
12984
12985 dev->irq = pdev->irq;
12986
12987 bp->regview = pci_ioremap_bar(pdev, 0);
12988 if (!bp->regview) {
12989 dev_err(&bp->pdev->dev,
12990 "Cannot map register space, aborting\n");
12991 rc = -ENOMEM;
12992 goto err_out_release;
12993 }
12994
12995 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12996 min_t(u64, BNX2X_DB_SIZE,
12997 pci_resource_len(pdev, 2)));
12998 if (!bp->doorbells) {
12999 dev_err(&bp->pdev->dev,
13000 "Cannot map doorbell space, aborting\n");
13001 rc = -ENOMEM;
13002 goto err_out_unmap;
13003 }
13004
13005 bnx2x_set_power_state(bp, PCI_D0);
13006
13007 /* clean indirect addresses */
13008 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13009 PCICFG_VENDOR_ID_OFFSET);
13010 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
13011 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
13012 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
13013 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
13014
13015 /* Reset the load counter */
13016 bnx2x_clear_load_cnt(bp);
13017
13018 dev->watchdog_timeo = TX_TIMEOUT;
13019
13020 dev->netdev_ops = &bnx2x_netdev_ops;
13021 dev->ethtool_ops = &bnx2x_ethtool_ops;
13022 dev->features |= NETIF_F_SG;
13023 dev->features |= NETIF_F_HW_CSUM;
13024 if (bp->flags & USING_DAC_FLAG)
13025 dev->features |= NETIF_F_HIGHDMA;
13026 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13027 dev->features |= NETIF_F_TSO6;
13028#ifdef BCM_VLAN
13029 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13030 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13031
13032 dev->vlan_features |= NETIF_F_SG;
13033 dev->vlan_features |= NETIF_F_HW_CSUM;
13034 if (bp->flags & USING_DAC_FLAG)
13035 dev->vlan_features |= NETIF_F_HIGHDMA;
13036 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13037 dev->vlan_features |= NETIF_F_TSO6;
13038#endif
13039
13040 /* get_port_hwinfo() will set prtad and mmds properly */
13041 bp->mdio.prtad = MDIO_PRTAD_NONE;
13042 bp->mdio.mmds = 0;
13043 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13044 bp->mdio.dev = dev;
13045 bp->mdio.mdio_read = bnx2x_mdio_read;
13046 bp->mdio.mdio_write = bnx2x_mdio_write;
13047
13048 return 0;
13049
13050err_out_unmap:
13051 if (bp->regview) {
13052 iounmap(bp->regview);
13053 bp->regview = NULL;
13054 }
13055 if (bp->doorbells) {
13056 iounmap(bp->doorbells);
13057 bp->doorbells = NULL;
13058 }
13059
13060err_out_release:
13061 if (atomic_read(&pdev->enable_cnt) == 1)
13062 pci_release_regions(pdev);
13063
13064err_out_disable:
13065 pci_disable_device(pdev);
13066 pci_set_drvdata(pdev, NULL);
13067
13068err_out:
13069 return rc;
13070}
13071
13072static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13073 int *width, int *speed)
13074{
13075 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13076
13077 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13078
13079 /* return value of 1=2.5GHz 2=5GHz */
13080 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13081}
13082
13083static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13084{
13085 const struct firmware *firmware = bp->firmware;
13086 struct bnx2x_fw_file_hdr *fw_hdr;
13087 struct bnx2x_fw_file_section *sections;
13088 u32 offset, len, num_ops;
13089 u16 *ops_offsets;
13090 int i;
13091 const u8 *fw_ver;
13092
13093 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13094 return -EINVAL;
13095
13096 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13097 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13098
13099 /* Make sure none of the offsets and sizes make us read beyond
13100 * the end of the firmware data */
13101 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13102 offset = be32_to_cpu(sections[i].offset);
13103 len = be32_to_cpu(sections[i].len);
13104 if (offset + len > firmware->size) {
13105 dev_err(&bp->pdev->dev,
13106 "Section %d length is out of bounds\n", i);
13107 return -EINVAL;
13108 }
13109 }
13110
13111 /* Likewise for the init_ops offsets */
13112 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13113 ops_offsets = (u16 *)(firmware->data + offset);
13114 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13115
13116 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13117 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13118 dev_err(&bp->pdev->dev,
13119 "Section offset %d is out of bounds\n", i);
13120 return -EINVAL;
13121 }
13122 }
13123
13124 /* Check FW version */
13125 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13126 fw_ver = firmware->data + offset;
13127 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13128 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13129 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13130 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13131 dev_err(&bp->pdev->dev,
13132 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13133 fw_ver[0], fw_ver[1], fw_ver[2],
13134 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13135 BCM_5710_FW_MINOR_VERSION,
13136 BCM_5710_FW_REVISION_VERSION,
13137 BCM_5710_FW_ENGINEERING_VERSION);
13138 return -EINVAL;
13139 }
13140
13141 return 0;
13142}
13143
13144static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13145{
13146 const __be32 *source = (const __be32 *)_source;
13147 u32 *target = (u32 *)_target;
13148 u32 i;
13149
13150 for (i = 0; i < n/4; i++)
13151 target[i] = be32_to_cpu(source[i]);
13152}
13153
13154/*
13155 Ops array is stored in the following format:
13156 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13157 */
13158static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13159{
13160 const __be32 *source = (const __be32 *)_source;
13161 struct raw_op *target = (struct raw_op *)_target;
13162 u32 i, j, tmp;
13163
13164 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13165 tmp = be32_to_cpu(source[j]);
13166 target[i].op = (tmp >> 24) & 0xff;
13167 target[i].offset = tmp & 0xffffff;
13168 target[i].raw_data = be32_to_cpu(source[j + 1]);
13169 }
13170}
13171
13172static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13173{
13174 const __be16 *source = (const __be16 *)_source;
13175 u16 *target = (u16 *)_target;
13176 u32 i;
13177
13178 for (i = 0; i < n/2; i++)
13179 target[i] = be16_to_cpu(source[i]);
13180}
13181
13182#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13183do { \
13184 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13185 bp->arr = kmalloc(len, GFP_KERNEL); \
13186 if (!bp->arr) { \
13187 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13188 goto lbl; \
13189 } \
13190 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13191 (u8 *)bp->arr, len); \
13192} while (0)
13193
13194static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13195{
13196 const char *fw_file_name;
13197 struct bnx2x_fw_file_hdr *fw_hdr;
13198 int rc;
13199
13200 if (CHIP_IS_E1(bp))
13201 fw_file_name = FW_FILE_NAME_E1;
13202 else if (CHIP_IS_E1H(bp))
13203 fw_file_name = FW_FILE_NAME_E1H;
13204 else {
13205 dev_err(dev, "Unsupported chip revision\n");
13206 return -EINVAL;
13207 }
13208
13209 dev_info(dev, "Loading %s\n", fw_file_name);
13210
13211 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13212 if (rc) {
13213 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13214 goto request_firmware_exit;
13215 }
13216
13217 rc = bnx2x_check_firmware(bp);
13218 if (rc) {
13219 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13220 goto request_firmware_exit;
13221 }
13222
13223 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13224
13225 /* Initialize the pointers to the init arrays */
13226 /* Blob */
13227 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13228
13229 /* Opcodes */
13230 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13231
13232 /* Offsets */
13233 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13234 be16_to_cpu_n);
13235
13236 /* STORMs firmware */
13237 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13238 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13239 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13240 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13241 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13242 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13243 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13244 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13245 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13246 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13247 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13248 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13249 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13250 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13251 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13252 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13253
13254 return 0;
13255
13256init_offsets_alloc_err:
13257 kfree(bp->init_ops);
13258init_ops_alloc_err:
13259 kfree(bp->init_data);
13260request_firmware_exit:
13261 release_firmware(bp->firmware);
13262
13263 return rc;
13264}
13265
13266
13267static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13268 const struct pci_device_id *ent)
13269{
13270 struct net_device *dev = NULL;
13271 struct bnx2x *bp;
13272 int pcie_width, pcie_speed;
13273 int rc;
13274
13275 /* dev zeroed in init_etherdev */
13276 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13277 if (!dev) {
13278 dev_err(&pdev->dev, "Cannot allocate net device\n");
13279 return -ENOMEM;
13280 }
13281
13282 bp = netdev_priv(dev);
13283 bp->msg_enable = debug;
13284
13285 pci_set_drvdata(pdev, dev);
13286
13287 rc = bnx2x_init_dev(pdev, dev);
13288 if (rc < 0) {
13289 free_netdev(dev);
13290 return rc;
13291 }
13292
13293 rc = bnx2x_init_bp(bp);
13294 if (rc)
13295 goto init_one_exit;
13296
13297 /* Set init arrays */
13298 rc = bnx2x_init_firmware(bp, &pdev->dev);
13299 if (rc) {
13300 dev_err(&pdev->dev, "Error loading firmware\n");
13301 goto init_one_exit;
13302 }
13303
13304 rc = register_netdev(dev);
13305 if (rc) {
13306 dev_err(&pdev->dev, "Cannot register net device\n");
13307 goto init_one_exit;
13308 }
13309
13310 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13311 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13312 " IRQ %d, ", board_info[ent->driver_data].name,
13313 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13314 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13315 dev->base_addr, bp->pdev->irq);
13316 pr_cont("node addr %pM\n", dev->dev_addr);
13317
13318 return 0;
13319
13320init_one_exit:
13321 if (bp->regview)
13322 iounmap(bp->regview);
13323
13324 if (bp->doorbells)
13325 iounmap(bp->doorbells);
13326
13327 free_netdev(dev);
13328
13329 if (atomic_read(&pdev->enable_cnt) == 1)
13330 pci_release_regions(pdev);
13331
13332 pci_disable_device(pdev);
13333 pci_set_drvdata(pdev, NULL);
13334
13335 return rc;
13336}
13337
13338static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13339{
13340 struct net_device *dev = pci_get_drvdata(pdev);
13341 struct bnx2x *bp;
13342
13343 if (!dev) {
13344 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13345 return;
13346 }
13347 bp = netdev_priv(dev);
13348
13349 unregister_netdev(dev);
13350
13351 /* Make sure RESET task is not scheduled before continuing */
13352 cancel_delayed_work_sync(&bp->reset_task);
13353
13354 kfree(bp->init_ops_offsets);
13355 kfree(bp->init_ops);
13356 kfree(bp->init_data);
13357 release_firmware(bp->firmware);
13358
13359 if (bp->regview)
13360 iounmap(bp->regview);
13361
13362 if (bp->doorbells)
13363 iounmap(bp->doorbells);
13364
13365 free_netdev(dev);
13366
13367 if (atomic_read(&pdev->enable_cnt) == 1)
13368 pci_release_regions(pdev);
13369
13370 pci_disable_device(pdev);
13371 pci_set_drvdata(pdev, NULL);
13372}
13373
13374static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13375{
13376 struct net_device *dev = pci_get_drvdata(pdev);
13377 struct bnx2x *bp;
13378
13379 if (!dev) {
13380 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13381 return -ENODEV;
13382 }
13383 bp = netdev_priv(dev);
13384
13385 rtnl_lock();
13386
13387 pci_save_state(pdev);
13388
13389 if (!netif_running(dev)) {
13390 rtnl_unlock();
13391 return 0;
13392 }
13393
13394 netif_device_detach(dev);
13395
13396 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13397
13398 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13399
13400 rtnl_unlock();
13401
13402 return 0;
13403}
13404
13405static int bnx2x_resume(struct pci_dev *pdev)
13406{
13407 struct net_device *dev = pci_get_drvdata(pdev);
13408 struct bnx2x *bp;
13409 int rc;
13410
13411 if (!dev) {
13412 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13413 return -ENODEV;
13414 }
13415 bp = netdev_priv(dev);
13416
13417 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13418 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13419 return -EAGAIN;
13420 }
13421
13422 rtnl_lock();
13423
13424 pci_restore_state(pdev);
13425
13426 if (!netif_running(dev)) {
13427 rtnl_unlock();
13428 return 0;
13429 }
13430
13431 bnx2x_set_power_state(bp, PCI_D0);
13432 netif_device_attach(dev);
13433
13434 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13435
13436 rtnl_unlock();
13437
13438 return rc;
13439}
13440
13441static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13442{
13443 int i;
13444
13445 bp->state = BNX2X_STATE_ERROR;
13446
13447 bp->rx_mode = BNX2X_RX_MODE_NONE;
13448
13449 bnx2x_netif_stop(bp, 0);
13450 netif_carrier_off(bp->dev);
13451
13452 del_timer_sync(&bp->timer);
13453 bp->stats_state = STATS_STATE_DISABLED;
13454 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13455
13456 /* Release IRQs */
13457 bnx2x_free_irq(bp, false);
13458
13459 if (CHIP_IS_E1(bp)) {
13460 struct mac_configuration_cmd *config =
13461 bnx2x_sp(bp, mcast_config);
13462
13463 for (i = 0; i < config->hdr.length; i++)
13464 CAM_INVALIDATE(config->config_table[i]);
13465 }
13466
13467 /* Free SKBs, SGEs, TPA pool and driver internals */
13468 bnx2x_free_skbs(bp);
13469 for_each_queue(bp, i)
13470 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13471 for_each_queue(bp, i)
13472 netif_napi_del(&bnx2x_fp(bp, i, napi));
13473 bnx2x_free_mem(bp);
13474
13475 bp->state = BNX2X_STATE_CLOSED;
13476
13477 return 0;
13478}
13479
13480static void bnx2x_eeh_recover(struct bnx2x *bp)
13481{
13482 u32 val;
13483
13484 mutex_init(&bp->port.phy_mutex);
13485
13486 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13487 bp->link_params.shmem_base = bp->common.shmem_base;
13488 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13489
13490 if (!bp->common.shmem_base ||
13491 (bp->common.shmem_base < 0xA0000) ||
13492 (bp->common.shmem_base >= 0xC0000)) {
13493 BNX2X_DEV_INFO("MCP not active\n");
13494 bp->flags |= NO_MCP_FLAG;
13495 return;
13496 }
13497
13498 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13499 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13500 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13501 BNX2X_ERR("BAD MCP validity signature\n");
13502
13503 if (!BP_NOMCP(bp)) {
13504 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13505 & DRV_MSG_SEQ_NUMBER_MASK);
13506 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13507 }
13508}
13509
13510/**
13511 * bnx2x_io_error_detected - called when PCI error is detected
13512 * @pdev: Pointer to PCI device
13513 * @state: The current pci connection state
13514 *
13515 * This function is called after a PCI bus error affecting
13516 * this device has been detected.
13517 */
13518static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13519 pci_channel_state_t state)
13520{
13521 struct net_device *dev = pci_get_drvdata(pdev);
13522 struct bnx2x *bp = netdev_priv(dev);
13523
13524 rtnl_lock();
13525
13526 netif_device_detach(dev);
13527
13528 if (state == pci_channel_io_perm_failure) {
13529 rtnl_unlock();
13530 return PCI_ERS_RESULT_DISCONNECT;
13531 }
13532
13533 if (netif_running(dev))
13534 bnx2x_eeh_nic_unload(bp);
13535
13536 pci_disable_device(pdev);
13537
13538 rtnl_unlock();
13539
13540 /* Request a slot reset */
13541 return PCI_ERS_RESULT_NEED_RESET;
13542}
13543
13544/**
13545 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13546 * @pdev: Pointer to PCI device
13547 *
13548 * Restart the card from scratch, as if from a cold-boot.
13549 */
13550static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13551{
13552 struct net_device *dev = pci_get_drvdata(pdev);
13553 struct bnx2x *bp = netdev_priv(dev);
13554
13555 rtnl_lock();
13556
13557 if (pci_enable_device(pdev)) {
13558 dev_err(&pdev->dev,
13559 "Cannot re-enable PCI device after reset\n");
13560 rtnl_unlock();
13561 return PCI_ERS_RESULT_DISCONNECT;
13562 }
13563
13564 pci_set_master(pdev);
13565 pci_restore_state(pdev);
13566
13567 if (netif_running(dev))
13568 bnx2x_set_power_state(bp, PCI_D0);
13569
13570 rtnl_unlock();
13571
13572 return PCI_ERS_RESULT_RECOVERED;
13573}
13574
13575/**
13576 * bnx2x_io_resume - called when traffic can start flowing again
13577 * @pdev: Pointer to PCI device
13578 *
13579 * This callback is called when the error recovery driver tells us that
13580 * its OK to resume normal operation.
13581 */
13582static void bnx2x_io_resume(struct pci_dev *pdev)
13583{
13584 struct net_device *dev = pci_get_drvdata(pdev);
13585 struct bnx2x *bp = netdev_priv(dev);
13586
13587 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13588 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13589 return;
13590 }
13591
13592 rtnl_lock();
13593
13594 bnx2x_eeh_recover(bp);
13595
13596 if (netif_running(dev))
13597 bnx2x_nic_load(bp, LOAD_NORMAL);
13598
13599 netif_device_attach(dev);
13600
13601 rtnl_unlock();
13602}
13603
13604static struct pci_error_handlers bnx2x_err_handler = {
13605 .error_detected = bnx2x_io_error_detected,
13606 .slot_reset = bnx2x_io_slot_reset,
13607 .resume = bnx2x_io_resume,
13608};
13609
13610static struct pci_driver bnx2x_pci_driver = {
13611 .name = DRV_MODULE_NAME,
13612 .id_table = bnx2x_pci_tbl,
13613 .probe = bnx2x_init_one,
13614 .remove = __devexit_p(bnx2x_remove_one),
13615 .suspend = bnx2x_suspend,
13616 .resume = bnx2x_resume,
13617 .err_handler = &bnx2x_err_handler,
13618};
13619
13620static int __init bnx2x_init(void)
13621{
13622 int ret;
13623
13624 pr_info("%s", version);
13625
13626 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13627 if (bnx2x_wq == NULL) {
13628 pr_err("Cannot create workqueue\n");
13629 return -ENOMEM;
13630 }
13631
13632 ret = pci_register_driver(&bnx2x_pci_driver);
13633 if (ret) {
13634 pr_err("Cannot register driver\n");
13635 destroy_workqueue(bnx2x_wq);
13636 }
13637 return ret;
13638}
13639
13640static void __exit bnx2x_cleanup(void)
13641{
13642 pci_unregister_driver(&bnx2x_pci_driver);
13643
13644 destroy_workqueue(bnx2x_wq);
13645}
13646
13647module_init(bnx2x_init);
13648module_exit(bnx2x_cleanup);
13649
13650#ifdef BCM_CNIC
13651
13652/* count denotes the number of new completions we have seen */
13653static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13654{
13655 struct eth_spe *spe;
13656
13657#ifdef BNX2X_STOP_ON_ERROR
13658 if (unlikely(bp->panic))
13659 return;
13660#endif
13661
13662 spin_lock_bh(&bp->spq_lock);
13663 bp->cnic_spq_pending -= count;
13664
13665 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13666 bp->cnic_spq_pending++) {
13667
13668 if (!bp->cnic_kwq_pending)
13669 break;
13670
13671 spe = bnx2x_sp_get_next(bp);
13672 *spe = *bp->cnic_kwq_cons;
13673
13674 bp->cnic_kwq_pending--;
13675
13676 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13677 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13678
13679 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13680 bp->cnic_kwq_cons = bp->cnic_kwq;
13681 else
13682 bp->cnic_kwq_cons++;
13683 }
13684 bnx2x_sp_prod_update(bp);
13685 spin_unlock_bh(&bp->spq_lock);
13686}
13687
13688static int bnx2x_cnic_sp_queue(struct net_device *dev,
13689 struct kwqe_16 *kwqes[], u32 count)
13690{
13691 struct bnx2x *bp = netdev_priv(dev);
13692 int i;
13693
13694#ifdef BNX2X_STOP_ON_ERROR
13695 if (unlikely(bp->panic))
13696 return -EIO;
13697#endif
13698
13699 spin_lock_bh(&bp->spq_lock);
13700
13701 for (i = 0; i < count; i++) {
13702 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13703
13704 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13705 break;
13706
13707 *bp->cnic_kwq_prod = *spe;
13708
13709 bp->cnic_kwq_pending++;
13710
13711 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13712 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13713 spe->data.mac_config_addr.hi,
13714 spe->data.mac_config_addr.lo,
13715 bp->cnic_kwq_pending);
13716
13717 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13718 bp->cnic_kwq_prod = bp->cnic_kwq;
13719 else
13720 bp->cnic_kwq_prod++;
13721 }
13722
13723 spin_unlock_bh(&bp->spq_lock);
13724
13725 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13726 bnx2x_cnic_sp_post(bp, 0);
13727
13728 return i;
13729}
13730
13731static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13732{
13733 struct cnic_ops *c_ops;
13734 int rc = 0;
13735
13736 mutex_lock(&bp->cnic_mutex);
13737 c_ops = bp->cnic_ops;
13738 if (c_ops)
13739 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13740 mutex_unlock(&bp->cnic_mutex);
13741
13742 return rc;
13743}
13744
13745static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13746{
13747 struct cnic_ops *c_ops;
13748 int rc = 0;
13749
13750 rcu_read_lock();
13751 c_ops = rcu_dereference(bp->cnic_ops);
13752 if (c_ops)
13753 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13754 rcu_read_unlock();
13755
13756 return rc;
13757}
13758
13759/*
13760 * for commands that have no data
13761 */
13762static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13763{
13764 struct cnic_ctl_info ctl = {0};
13765
13766 ctl.cmd = cmd;
13767
13768 return bnx2x_cnic_ctl_send(bp, &ctl);
13769}
13770
13771static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13772{
13773 struct cnic_ctl_info ctl;
13774
13775 /* first we tell CNIC and only then we count this as a completion */
13776 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13777 ctl.data.comp.cid = cid;
13778
13779 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13780 bnx2x_cnic_sp_post(bp, 1);
13781}
13782
13783static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13784{
13785 struct bnx2x *bp = netdev_priv(dev);
13786 int rc = 0;
13787
13788 switch (ctl->cmd) {
13789 case DRV_CTL_CTXTBL_WR_CMD: {
13790 u32 index = ctl->data.io.offset;
13791 dma_addr_t addr = ctl->data.io.dma_addr;
13792
13793 bnx2x_ilt_wr(bp, index, addr);
13794 break;
13795 }
13796
13797 case DRV_CTL_COMPLETION_CMD: {
13798 int count = ctl->data.comp.comp_count;
13799
13800 bnx2x_cnic_sp_post(bp, count);
13801 break;
13802 }
13803
13804 /* rtnl_lock is held. */
13805 case DRV_CTL_START_L2_CMD: {
13806 u32 cli = ctl->data.ring.client_id;
13807
13808 bp->rx_mode_cl_mask |= (1 << cli);
13809 bnx2x_set_storm_rx_mode(bp);
13810 break;
13811 }
13812
13813 /* rtnl_lock is held. */
13814 case DRV_CTL_STOP_L2_CMD: {
13815 u32 cli = ctl->data.ring.client_id;
13816
13817 bp->rx_mode_cl_mask &= ~(1 << cli);
13818 bnx2x_set_storm_rx_mode(bp);
13819 break;
13820 }
13821
13822 default:
13823 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13824 rc = -EINVAL;
13825 }
13826
13827 return rc;
13828}
13829
13830static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13831{
13832 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13833
13834 if (bp->flags & USING_MSIX_FLAG) {
13835 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13836 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13837 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13838 } else {
13839 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13840 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13841 }
13842 cp->irq_arr[0].status_blk = bp->cnic_sb;
13843 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13844 cp->irq_arr[1].status_blk = bp->def_status_blk;
13845 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13846
13847 cp->num_irq = 2;
13848}
13849
13850static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13851 void *data)
13852{
13853 struct bnx2x *bp = netdev_priv(dev);
13854 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13855
13856 if (ops == NULL)
13857 return -EINVAL;
13858
13859 if (atomic_read(&bp->intr_sem) != 0)
13860 return -EBUSY;
13861
13862 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13863 if (!bp->cnic_kwq)
13864 return -ENOMEM;
13865
13866 bp->cnic_kwq_cons = bp->cnic_kwq;
13867 bp->cnic_kwq_prod = bp->cnic_kwq;
13868 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13869
13870 bp->cnic_spq_pending = 0;
13871 bp->cnic_kwq_pending = 0;
13872
13873 bp->cnic_data = data;
13874
13875 cp->num_irq = 0;
13876 cp->drv_state = CNIC_DRV_STATE_REGD;
13877
13878 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13879
13880 bnx2x_setup_cnic_irq_info(bp);
13881 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13882 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13883 rcu_assign_pointer(bp->cnic_ops, ops);
13884
13885 return 0;
13886}
13887
13888static int bnx2x_unregister_cnic(struct net_device *dev)
13889{
13890 struct bnx2x *bp = netdev_priv(dev);
13891 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13892
13893 mutex_lock(&bp->cnic_mutex);
13894 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13895 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13896 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13897 }
13898 cp->drv_state = 0;
13899 rcu_assign_pointer(bp->cnic_ops, NULL);
13900 mutex_unlock(&bp->cnic_mutex);
13901 synchronize_rcu();
13902 kfree(bp->cnic_kwq);
13903 bp->cnic_kwq = NULL;
13904
13905 return 0;
13906}
13907
13908struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13909{
13910 struct bnx2x *bp = netdev_priv(dev);
13911 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13912
13913 cp->drv_owner = THIS_MODULE;
13914 cp->chip_id = CHIP_ID(bp);
13915 cp->pdev = bp->pdev;
13916 cp->io_base = bp->regview;
13917 cp->io_base2 = bp->doorbells;
13918 cp->max_kwqe_pending = 8;
13919 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13920 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13921 cp->ctx_tbl_len = CNIC_ILT_LINES;
13922 cp->starting_cid = BCM_CNIC_CID_START;
13923 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13924 cp->drv_ctl = bnx2x_drv_ctl;
13925 cp->drv_register_cnic = bnx2x_register_cnic;
13926 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13927
13928 return cp;
13929}
13930EXPORT_SYMBOL(bnx2x_cnic_probe);
13931
13932#endif /* BCM_CNIC */
13933
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
new file mode 100644
index 000000000000..a1f3bf0cd630
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -0,0 +1,5364 @@
1/* bnx2x_reg.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * The registers description starts with the register Access type followed
10 * by size in bits. For example [RW 32]. The access types are:
11 * R - Read only
12 * RC - Clear on read
13 * RW - Read/Write
14 * ST - Statistics register (clear on read)
15 * W - Write only
16 * WB - Wide bus register - the size is over 32 bits and it should be
17 * read/write in consecutive 32 bits accesses
18 * WR - Write Clear (write 1 to clear the bit)
19 *
20 */
21
22
23/* [R 19] Interrupt register #0 read */
24#define BRB1_REG_BRB1_INT_STS 0x6011c
25/* [RW 4] Parity mask register #0 read/write */
26#define BRB1_REG_BRB1_PRTY_MASK 0x60138
27/* [R 4] Parity register #0 read */
28#define BRB1_REG_BRB1_PRTY_STS 0x6012c
29/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
30 address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
31 BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */
32#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
33/* [RW 10] The number of free blocks above which the High_llfc signal to
34 interface #n is de-asserted. */
35#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
36/* [RW 10] The number of free blocks below which the High_llfc signal to
37 interface #n is asserted. */
38#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
39/* [RW 23] LL RAM data. */
40#define BRB1_REG_LL_RAM 0x61000
41/* [RW 10] The number of free blocks above which the Low_llfc signal to
42 interface #n is de-asserted. */
43#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
44/* [RW 10] The number of free blocks below which the Low_llfc signal to
45 interface #n is asserted. */
46#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
47/* [R 24] The number of full blocks. */
48#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
49/* [ST 32] The number of cycles that the write_full signal towards MAC #0
50 was asserted. */
51#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
52#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
53#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
54/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
55 asserted. */
56#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
57#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
58/* [RW 10] Write client 0: De-assert pause threshold. */
59#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
60#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
61/* [RW 10] Write client 0: Assert pause threshold. */
62#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
63#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
64/* [R 24] The number of full blocks occupied by port. */
65#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
66/* [RW 1] Reset the design by software. */
67#define BRB1_REG_SOFT_RESET 0x600dc
68/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
69#define CCM_REG_CAM_OCCUP 0xd0188
70/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
71 acknowledge output is deasserted; all other signals are treated as usual;
72 if 1 - normal activity. */
73#define CCM_REG_CCM_CFC_IFEN 0xd003c
74/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
75 disregarded; valid is deasserted; all other signals are treated as usual;
76 if 1 - normal activity. */
77#define CCM_REG_CCM_CQM_IFEN 0xd000c
78/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
79 Otherwise 0 is inserted. */
80#define CCM_REG_CCM_CQM_USE_Q 0xd00c0
81/* [RW 11] Interrupt mask register #0 read/write */
82#define CCM_REG_CCM_INT_MASK 0xd01e4
83/* [R 11] Interrupt register #0 read */
84#define CCM_REG_CCM_INT_STS 0xd01d8
85/* [R 27] Parity register #0 read */
86#define CCM_REG_CCM_PRTY_STS 0xd01e8
87/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
88 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
89 Is used to determine the number of the AG context REG-pairs written back;
90 when the input message Reg1WbFlg isn't set. */
91#define CCM_REG_CCM_REG0_SZ 0xd00c4
92/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
93 disregarded; valid is deasserted; all other signals are treated as usual;
94 if 1 - normal activity. */
95#define CCM_REG_CCM_STORM0_IFEN 0xd0004
96/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
97 disregarded; valid is deasserted; all other signals are treated as usual;
98 if 1 - normal activity. */
99#define CCM_REG_CCM_STORM1_IFEN 0xd0008
100/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
101 disregarded; valid output is deasserted; all other signals are treated as
102 usual; if 1 - normal activity. */
103#define CCM_REG_CDU_AG_RD_IFEN 0xd0030
104/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
105 are disregarded; all other signals are treated as usual; if 1 - normal
106 activity. */
107#define CCM_REG_CDU_AG_WR_IFEN 0xd002c
108/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
109 disregarded; valid output is deasserted; all other signals are treated as
110 usual; if 1 - normal activity. */
111#define CCM_REG_CDU_SM_RD_IFEN 0xd0038
112/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
113 input is disregarded; all other signals are treated as usual; if 1 -
114 normal activity. */
115#define CCM_REG_CDU_SM_WR_IFEN 0xd0034
116/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
117 the initial credit value; read returns the current value of the credit
118 counter. Must be initialized to 1 at start-up. */
119#define CCM_REG_CFC_INIT_CRD 0xd0204
120/* [RW 2] Auxillary counter flag Q number 1. */
121#define CCM_REG_CNT_AUX1_Q 0xd00c8
122/* [RW 2] Auxillary counter flag Q number 2. */
123#define CCM_REG_CNT_AUX2_Q 0xd00cc
124/* [RW 28] The CM header value for QM request (primary). */
125#define CCM_REG_CQM_CCM_HDR_P 0xd008c
126/* [RW 28] The CM header value for QM request (secondary). */
127#define CCM_REG_CQM_CCM_HDR_S 0xd0090
128/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
129 acknowledge output is deasserted; all other signals are treated as usual;
130 if 1 - normal activity. */
131#define CCM_REG_CQM_CCM_IFEN 0xd0014
132/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
133 the initial credit value; read returns the current value of the credit
134 counter. Must be initialized to 32 at start-up. */
135#define CCM_REG_CQM_INIT_CRD 0xd020c
136/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
137 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
138 prioritised); 2 stands for weight 2; tc. */
139#define CCM_REG_CQM_P_WEIGHT 0xd00b8
140/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
141 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
142 prioritised); 2 stands for weight 2; tc. */
143#define CCM_REG_CQM_S_WEIGHT 0xd00bc
144/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
145 acknowledge output is deasserted; all other signals are treated as usual;
146 if 1 - normal activity. */
147#define CCM_REG_CSDM_IFEN 0xd0018
148/* [RC 1] Set when the message length mismatch (relative to last indication)
149 at the SDM interface is detected. */
150#define CCM_REG_CSDM_LENGTH_MIS 0xd0170
151/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
152 weight 8 (the most prioritised); 1 stands for weight 1(least
153 prioritised); 2 stands for weight 2; tc. */
154#define CCM_REG_CSDM_WEIGHT 0xd00b4
155/* [RW 28] The CM header for QM formatting in case of an error in the QM
156 inputs. */
157#define CCM_REG_ERR_CCM_HDR 0xd0094
158/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
159#define CCM_REG_ERR_EVNT_ID 0xd0098
160/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
161 writes the initial credit value; read returns the current value of the
162 credit counter. Must be initialized to 64 at start-up. */
163#define CCM_REG_FIC0_INIT_CRD 0xd0210
164/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
165 writes the initial credit value; read returns the current value of the
166 credit counter. Must be initialized to 64 at start-up. */
167#define CCM_REG_FIC1_INIT_CRD 0xd0214
168/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
169 - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
170 ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
171 ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
172 outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
173#define CCM_REG_GR_ARB_TYPE 0xd015c
174/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
175 highest priority is 3. It is supposed; that the Store channel priority is
176 the compliment to 4 of the rest priorities - Aggregation channel; Load
177 (FIC0) channel and Load (FIC1). */
178#define CCM_REG_GR_LD0_PR 0xd0164
179/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
180 highest priority is 3. It is supposed; that the Store channel priority is
181 the compliment to 4 of the rest priorities - Aggregation channel; Load
182 (FIC0) channel and Load (FIC1). */
183#define CCM_REG_GR_LD1_PR 0xd0168
184/* [RW 2] General flags index. */
185#define CCM_REG_INV_DONE_Q 0xd0108
186/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
187 context and sent to STORM; for a specific connection type. The double
188 REG-pairs are used in order to align to STORM context row size of 128
189 bits. The offset of these data in the STORM context is always 0. Index
190 _(0..15) stands for the connection type (one of 16). */
191#define CCM_REG_N_SM_CTX_LD_0 0xd004c
192#define CCM_REG_N_SM_CTX_LD_1 0xd0050
193#define CCM_REG_N_SM_CTX_LD_2 0xd0054
194#define CCM_REG_N_SM_CTX_LD_3 0xd0058
195#define CCM_REG_N_SM_CTX_LD_4 0xd005c
196/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
197 acknowledge output is deasserted; all other signals are treated as usual;
198 if 1 - normal activity. */
199#define CCM_REG_PBF_IFEN 0xd0028
200/* [RC 1] Set when the message length mismatch (relative to last indication)
201 at the pbf interface is detected. */
202#define CCM_REG_PBF_LENGTH_MIS 0xd0180
203/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
204 weight 8 (the most prioritised); 1 stands for weight 1(least
205 prioritised); 2 stands for weight 2; tc. */
206#define CCM_REG_PBF_WEIGHT 0xd00ac
207#define CCM_REG_PHYS_QNUM1_0 0xd0134
208#define CCM_REG_PHYS_QNUM1_1 0xd0138
209#define CCM_REG_PHYS_QNUM2_0 0xd013c
210#define CCM_REG_PHYS_QNUM2_1 0xd0140
211#define CCM_REG_PHYS_QNUM3_0 0xd0144
212#define CCM_REG_PHYS_QNUM3_1 0xd0148
213#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
214#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
215#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
216#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
217#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
218#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
219#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
220#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
221/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
222 disregarded; acknowledge output is deasserted; all other signals are
223 treated as usual; if 1 - normal activity. */
224#define CCM_REG_STORM_CCM_IFEN 0xd0010
225/* [RC 1] Set when the message length mismatch (relative to last indication)
226 at the STORM interface is detected. */
227#define CCM_REG_STORM_LENGTH_MIS 0xd016c
228/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
229 mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
230 weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
231 tc. */
232#define CCM_REG_STORM_WEIGHT 0xd009c
233/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
234 disregarded; acknowledge output is deasserted; all other signals are
235 treated as usual; if 1 - normal activity. */
236#define CCM_REG_TSEM_IFEN 0xd001c
237/* [RC 1] Set when the message length mismatch (relative to last indication)
238 at the tsem interface is detected. */
239#define CCM_REG_TSEM_LENGTH_MIS 0xd0174
240/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
241 weight 8 (the most prioritised); 1 stands for weight 1(least
242 prioritised); 2 stands for weight 2; tc. */
243#define CCM_REG_TSEM_WEIGHT 0xd00a0
244/* [RW 1] Input usem Interface enable. If 0 - the valid input is
245 disregarded; acknowledge output is deasserted; all other signals are
246 treated as usual; if 1 - normal activity. */
247#define CCM_REG_USEM_IFEN 0xd0024
248/* [RC 1] Set when message length mismatch (relative to last indication) at
249 the usem interface is detected. */
250#define CCM_REG_USEM_LENGTH_MIS 0xd017c
251/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
252 weight 8 (the most prioritised); 1 stands for weight 1(least
253 prioritised); 2 stands for weight 2; tc. */
254#define CCM_REG_USEM_WEIGHT 0xd00a8
255/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
256 disregarded; acknowledge output is deasserted; all other signals are
257 treated as usual; if 1 - normal activity. */
258#define CCM_REG_XSEM_IFEN 0xd0020
259/* [RC 1] Set when the message length mismatch (relative to last indication)
260 at the xsem interface is detected. */
261#define CCM_REG_XSEM_LENGTH_MIS 0xd0178
262/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
263 weight 8 (the most prioritised); 1 stands for weight 1(least
264 prioritised); 2 stands for weight 2; tc. */
265#define CCM_REG_XSEM_WEIGHT 0xd00a4
266/* [RW 19] Indirect access to the descriptor table of the XX protection
267 mechanism. The fields are: [5:0] - message length; [12:6] - message
268 pointer; 18:13] - next pointer. */
269#define CCM_REG_XX_DESCR_TABLE 0xd0300
270#define CCM_REG_XX_DESCR_TABLE_SIZE 36
271/* [R 7] Used to read the value of XX protection Free counter. */
272#define CCM_REG_XX_FREE 0xd0184
273/* [RW 6] Initial value for the credit counter; responsible for fulfilling
274 of the Input Stage XX protection buffer by the XX protection pending
275 messages. Max credit available - 127. Write writes the initial credit
276 value; read returns the current value of the credit counter. Must be
277 initialized to maximum XX protected message size - 2 at start-up. */
278#define CCM_REG_XX_INIT_CRD 0xd0220
279/* [RW 7] The maximum number of pending messages; which may be stored in XX
280 protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
281 At write comprises the start value of the ~ccm_registers_xx_free.xx_free
282 counter. */
283#define CCM_REG_XX_MSG_NUM 0xd0224
284/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
285#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044
286/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
287 The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
288 header pointer. */
289#define CCM_REG_XX_TABLE 0xd0280
290#define CDU_REG_CDU_CHK_MASK0 0x101000
291#define CDU_REG_CDU_CHK_MASK1 0x101004
292#define CDU_REG_CDU_CONTROL0 0x101008
293#define CDU_REG_CDU_DEBUG 0x101010
294#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020
295/* [RW 7] Interrupt mask register #0 read/write */
296#define CDU_REG_CDU_INT_MASK 0x10103c
297/* [R 7] Interrupt register #0 read */
298#define CDU_REG_CDU_INT_STS 0x101030
299/* [RW 5] Parity mask register #0 read/write */
300#define CDU_REG_CDU_PRTY_MASK 0x10104c
301/* [R 5] Parity register #0 read */
302#define CDU_REG_CDU_PRTY_STS 0x101040
303/* [RC 32] logging of error data in case of a CDU load error:
304 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
305 ype_error; ctual_active; ctual_compressed_context}; */
306#define CDU_REG_ERROR_DATA 0x101014
307/* [WB 216] L1TT ram access. each entry has the following format :
308 {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
309 ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
310#define CDU_REG_L1TT 0x101800
311/* [WB 24] MATT ram access. each entry has the following
312 format:{RegionLength[11:0]; egionOffset[11:0]} */
313#define CDU_REG_MATT 0x101100
314/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
315#define CDU_REG_MF_MODE 0x101050
316/* [R 1] indication the initializing the activity counter by the hardware
317 was done. */
318#define CFC_REG_AC_INIT_DONE 0x104078
319/* [RW 13] activity counter ram access */
320#define CFC_REG_ACTIVITY_COUNTER 0x104400
321#define CFC_REG_ACTIVITY_COUNTER_SIZE 256
322/* [R 1] indication the initializing the cams by the hardware was done. */
323#define CFC_REG_CAM_INIT_DONE 0x10407c
324/* [RW 2] Interrupt mask register #0 read/write */
325#define CFC_REG_CFC_INT_MASK 0x104108
326/* [R 2] Interrupt register #0 read */
327#define CFC_REG_CFC_INT_STS 0x1040fc
328/* [RC 2] Interrupt register #0 read clear */
329#define CFC_REG_CFC_INT_STS_CLR 0x104100
330/* [RW 4] Parity mask register #0 read/write */
331#define CFC_REG_CFC_PRTY_MASK 0x104118
332/* [R 4] Parity register #0 read */
333#define CFC_REG_CFC_PRTY_STS 0x10410c
334/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
335#define CFC_REG_CID_CAM 0x104800
336#define CFC_REG_CONTROL0 0x104028
337#define CFC_REG_DEBUG0 0x104050
338/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
339 vector) whether the cfc should be disabled upon it */
340#define CFC_REG_DISABLE_ON_ERROR 0x104044
341/* [RC 14] CFC error vector. when the CFC detects an internal error it will
342 set one of these bits. the bit description can be found in CFC
343 specifications */
344#define CFC_REG_ERROR_VECTOR 0x10403c
345/* [WB 93] LCID info ram access */
346#define CFC_REG_INFO_RAM 0x105000
347#define CFC_REG_INFO_RAM_SIZE 1024
348#define CFC_REG_INIT_REG 0x10404c
349#define CFC_REG_INTERFACES 0x104058
350/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
351 field allows changing the priorities of the weighted-round-robin arbiter
352 which selects which CFC load client should be served next */
353#define CFC_REG_LCREQ_WEIGHTS 0x104084
354/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
355#define CFC_REG_LINK_LIST 0x104c00
356#define CFC_REG_LINK_LIST_SIZE 256
357/* [R 1] indication the initializing the link list by the hardware was done. */
358#define CFC_REG_LL_INIT_DONE 0x104074
359/* [R 9] Number of allocated LCIDs which are at empty state */
360#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
361/* [R 9] Number of Arriving LCIDs in Link List Block */
362#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
363/* [R 9] Number of Leaving LCIDs in Link List Block */
364#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
365/* [RW 8] The event id for aggregated interrupt 0 */
366#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
367#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
368#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
369#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
370#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
371#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
372#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
373#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
374#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
375#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
376#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
377#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
378#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
379#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
380#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
381#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
382/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
383 or auto-mask-mode (1) */
384#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
385#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
386#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
387#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
388#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
389#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
390#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
391#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
392#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
393#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
394#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
395/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
396#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
397/* [RW 16] The maximum value of the competion counter #0 */
398#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
399/* [RW 16] The maximum value of the competion counter #1 */
400#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
401/* [RW 16] The maximum value of the competion counter #2 */
402#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
403/* [RW 16] The maximum value of the competion counter #3 */
404#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
405/* [RW 13] The start address in the internal RAM for the completion
406 counters. */
407#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c
408/* [RW 32] Interrupt mask register #0 read/write */
409#define CSDM_REG_CSDM_INT_MASK_0 0xc229c
410#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
411/* [R 32] Interrupt register #0 read */
412#define CSDM_REG_CSDM_INT_STS_0 0xc2290
413#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
414/* [RW 11] Parity mask register #0 read/write */
415#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
416/* [R 11] Parity register #0 read */
417#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
418#define CSDM_REG_ENABLE_IN1 0xc2238
419#define CSDM_REG_ENABLE_IN2 0xc223c
420#define CSDM_REG_ENABLE_OUT1 0xc2240
421#define CSDM_REG_ENABLE_OUT2 0xc2244
422/* [RW 4] The initial number of messages that can be sent to the pxp control
423 interface without receiving any ACK. */
424#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc
425/* [ST 32] The number of ACK after placement messages received */
426#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c
427/* [ST 32] The number of packet end messages received from the parser */
428#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274
429/* [ST 32] The number of requests received from the pxp async if */
430#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278
431/* [ST 32] The number of commands received in queue 0 */
432#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248
433/* [ST 32] The number of commands received in queue 10 */
434#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c
435/* [ST 32] The number of commands received in queue 11 */
436#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270
437/* [ST 32] The number of commands received in queue 1 */
438#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c
439/* [ST 32] The number of commands received in queue 3 */
440#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250
441/* [ST 32] The number of commands received in queue 4 */
442#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254
443/* [ST 32] The number of commands received in queue 5 */
444#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258
445/* [ST 32] The number of commands received in queue 6 */
446#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c
447/* [ST 32] The number of commands received in queue 7 */
448#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260
449/* [ST 32] The number of commands received in queue 8 */
450#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264
451/* [ST 32] The number of commands received in queue 9 */
452#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268
453/* [RW 13] The start address in the internal RAM for queue counters */
454#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010
455/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
456#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548
457/* [R 1] parser fifo empty in sdm_sync block */
458#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550
459/* [R 1] parser serial fifo empty in sdm_sync block */
460#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558
461/* [RW 32] Tick for timer counter. Applicable only when
462 ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
463#define CSDM_REG_TIMER_TICK 0xc2000
464/* [RW 5] The number of time_slots in the arbitration cycle */
465#define CSEM_REG_ARB_CYCLE_SIZE 0x200034
466/* [RW 3] The source that is associated with arbitration element 0. Source
467 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
468 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
469#define CSEM_REG_ARB_ELEMENT0 0x200020
470/* [RW 3] The source that is associated with arbitration element 1. Source
471 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
472 sleeping thread with priority 1; 4- sleeping thread with priority 2.
473 Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
474#define CSEM_REG_ARB_ELEMENT1 0x200024
475/* [RW 3] The source that is associated with arbitration element 2. Source
476 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
477 sleeping thread with priority 1; 4- sleeping thread with priority 2.
478 Could not be equal to register ~csem_registers_arb_element0.arb_element0
479 and ~csem_registers_arb_element1.arb_element1 */
480#define CSEM_REG_ARB_ELEMENT2 0x200028
481/* [RW 3] The source that is associated with arbitration element 3. Source
482 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
483 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
484 not be equal to register ~csem_registers_arb_element0.arb_element0 and
485 ~csem_registers_arb_element1.arb_element1 and
486 ~csem_registers_arb_element2.arb_element2 */
487#define CSEM_REG_ARB_ELEMENT3 0x20002c
488/* [RW 3] The source that is associated with arbitration element 4. Source
489 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
490 sleeping thread with priority 1; 4- sleeping thread with priority 2.
491 Could not be equal to register ~csem_registers_arb_element0.arb_element0
492 and ~csem_registers_arb_element1.arb_element1 and
493 ~csem_registers_arb_element2.arb_element2 and
494 ~csem_registers_arb_element3.arb_element3 */
495#define CSEM_REG_ARB_ELEMENT4 0x200030
496/* [RW 32] Interrupt mask register #0 read/write */
497#define CSEM_REG_CSEM_INT_MASK_0 0x200110
498#define CSEM_REG_CSEM_INT_MASK_1 0x200120
499/* [R 32] Interrupt register #0 read */
500#define CSEM_REG_CSEM_INT_STS_0 0x200104
501#define CSEM_REG_CSEM_INT_STS_1 0x200114
502/* [RW 32] Parity mask register #0 read/write */
503#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
504#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
505/* [R 32] Parity register #0 read */
506#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
507#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
508#define CSEM_REG_ENABLE_IN 0x2000a4
509#define CSEM_REG_ENABLE_OUT 0x2000a8
510/* [RW 32] This address space contains all registers and memories that are
511 placed in SEM_FAST block. The SEM_FAST registers are described in
512 appendix B. In order to access the sem_fast registers the base address
513 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
514#define CSEM_REG_FAST_MEMORY 0x220000
515/* [RW 1] Disables input messages from FIC0 May be updated during run_time
516 by the microcode */
517#define CSEM_REG_FIC0_DISABLE 0x200224
518/* [RW 1] Disables input messages from FIC1 May be updated during run_time
519 by the microcode */
520#define CSEM_REG_FIC1_DISABLE 0x200234
521/* [RW 15] Interrupt table Read and write access to it is not possible in
522 the middle of the work */
523#define CSEM_REG_INT_TABLE 0x200400
524/* [ST 24] Statistics register. The number of messages that entered through
525 FIC0 */
526#define CSEM_REG_MSG_NUM_FIC0 0x200000
527/* [ST 24] Statistics register. The number of messages that entered through
528 FIC1 */
529#define CSEM_REG_MSG_NUM_FIC1 0x200004
530/* [ST 24] Statistics register. The number of messages that were sent to
531 FOC0 */
532#define CSEM_REG_MSG_NUM_FOC0 0x200008
533/* [ST 24] Statistics register. The number of messages that were sent to
534 FOC1 */
535#define CSEM_REG_MSG_NUM_FOC1 0x20000c
536/* [ST 24] Statistics register. The number of messages that were sent to
537 FOC2 */
538#define CSEM_REG_MSG_NUM_FOC2 0x200010
539/* [ST 24] Statistics register. The number of messages that were sent to
540 FOC3 */
541#define CSEM_REG_MSG_NUM_FOC3 0x200014
542/* [RW 1] Disables input messages from the passive buffer May be updated
543 during run_time by the microcode */
544#define CSEM_REG_PAS_DISABLE 0x20024c
545/* [WB 128] Debug only. Passive buffer memory */
546#define CSEM_REG_PASSIVE_BUFFER 0x202000
547/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
548#define CSEM_REG_PRAM 0x240000
549/* [R 16] Valid sleeping threads indication have bit per thread */
550#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c
551/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
552#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0
553/* [RW 16] List of free threads . There is a bit per thread. */
554#define CSEM_REG_THREADS_LIST 0x2002e4
555/* [RW 3] The arbitration scheme of time_slot 0 */
556#define CSEM_REG_TS_0_AS 0x200038
557/* [RW 3] The arbitration scheme of time_slot 10 */
558#define CSEM_REG_TS_10_AS 0x200060
559/* [RW 3] The arbitration scheme of time_slot 11 */
560#define CSEM_REG_TS_11_AS 0x200064
561/* [RW 3] The arbitration scheme of time_slot 12 */
562#define CSEM_REG_TS_12_AS 0x200068
563/* [RW 3] The arbitration scheme of time_slot 13 */
564#define CSEM_REG_TS_13_AS 0x20006c
565/* [RW 3] The arbitration scheme of time_slot 14 */
566#define CSEM_REG_TS_14_AS 0x200070
567/* [RW 3] The arbitration scheme of time_slot 15 */
568#define CSEM_REG_TS_15_AS 0x200074
569/* [RW 3] The arbitration scheme of time_slot 16 */
570#define CSEM_REG_TS_16_AS 0x200078
571/* [RW 3] The arbitration scheme of time_slot 17 */
572#define CSEM_REG_TS_17_AS 0x20007c
573/* [RW 3] The arbitration scheme of time_slot 18 */
574#define CSEM_REG_TS_18_AS 0x200080
575/* [RW 3] The arbitration scheme of time_slot 1 */
576#define CSEM_REG_TS_1_AS 0x20003c
577/* [RW 3] The arbitration scheme of time_slot 2 */
578#define CSEM_REG_TS_2_AS 0x200040
579/* [RW 3] The arbitration scheme of time_slot 3 */
580#define CSEM_REG_TS_3_AS 0x200044
581/* [RW 3] The arbitration scheme of time_slot 4 */
582#define CSEM_REG_TS_4_AS 0x200048
583/* [RW 3] The arbitration scheme of time_slot 5 */
584#define CSEM_REG_TS_5_AS 0x20004c
585/* [RW 3] The arbitration scheme of time_slot 6 */
586#define CSEM_REG_TS_6_AS 0x200050
587/* [RW 3] The arbitration scheme of time_slot 7 */
588#define CSEM_REG_TS_7_AS 0x200054
589/* [RW 3] The arbitration scheme of time_slot 8 */
590#define CSEM_REG_TS_8_AS 0x200058
591/* [RW 3] The arbitration scheme of time_slot 9 */
592#define CSEM_REG_TS_9_AS 0x20005c
593/* [RW 1] Parity mask register #0 read/write */
594#define DBG_REG_DBG_PRTY_MASK 0xc0a8
595/* [R 1] Parity register #0 read */
596#define DBG_REG_DBG_PRTY_STS 0xc09c
597/* [RW 32] Commands memory. The address to command X; row Y is to calculated
598 as 14*X+Y. */
599#define DMAE_REG_CMD_MEM 0x102400
600#define DMAE_REG_CMD_MEM_SIZE 224
601/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
602 initial value is all ones. */
603#define DMAE_REG_CRC16C_INIT 0x10201c
604/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
605 CRC-16 T10 initial value is all ones. */
606#define DMAE_REG_CRC16T10_INIT 0x102020
607/* [RW 2] Interrupt mask register #0 read/write */
608#define DMAE_REG_DMAE_INT_MASK 0x102054
609/* [RW 4] Parity mask register #0 read/write */
610#define DMAE_REG_DMAE_PRTY_MASK 0x102064
611/* [R 4] Parity register #0 read */
612#define DMAE_REG_DMAE_PRTY_STS 0x102058
613/* [RW 1] Command 0 go. */
614#define DMAE_REG_GO_C0 0x102080
615/* [RW 1] Command 1 go. */
616#define DMAE_REG_GO_C1 0x102084
617/* [RW 1] Command 10 go. */
618#define DMAE_REG_GO_C10 0x102088
619/* [RW 1] Command 11 go. */
620#define DMAE_REG_GO_C11 0x10208c
621/* [RW 1] Command 12 go. */
622#define DMAE_REG_GO_C12 0x102090
623/* [RW 1] Command 13 go. */
624#define DMAE_REG_GO_C13 0x102094
625/* [RW 1] Command 14 go. */
626#define DMAE_REG_GO_C14 0x102098
627/* [RW 1] Command 15 go. */
628#define DMAE_REG_GO_C15 0x10209c
629/* [RW 1] Command 2 go. */
630#define DMAE_REG_GO_C2 0x1020a0
631/* [RW 1] Command 3 go. */
632#define DMAE_REG_GO_C3 0x1020a4
633/* [RW 1] Command 4 go. */
634#define DMAE_REG_GO_C4 0x1020a8
635/* [RW 1] Command 5 go. */
636#define DMAE_REG_GO_C5 0x1020ac
637/* [RW 1] Command 6 go. */
638#define DMAE_REG_GO_C6 0x1020b0
639/* [RW 1] Command 7 go. */
640#define DMAE_REG_GO_C7 0x1020b4
641/* [RW 1] Command 8 go. */
642#define DMAE_REG_GO_C8 0x1020b8
643/* [RW 1] Command 9 go. */
644#define DMAE_REG_GO_C9 0x1020bc
645/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
646 input is disregarded; valid is deasserted; all other signals are treated
647 as usual; if 1 - normal activity. */
648#define DMAE_REG_GRC_IFEN 0x102008
649/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
650 acknowledge input is disregarded; valid is deasserted; full is asserted;
651 all other signals are treated as usual; if 1 - normal activity. */
652#define DMAE_REG_PCI_IFEN 0x102004
653/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
654 initial value to the credit counter; related to the address. Read returns
655 the current value of the counter. */
656#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0
657/* [RW 8] Aggregation command. */
658#define DORQ_REG_AGG_CMD0 0x170060
659/* [RW 8] Aggregation command. */
660#define DORQ_REG_AGG_CMD1 0x170064
661/* [RW 8] Aggregation command. */
662#define DORQ_REG_AGG_CMD2 0x170068
663/* [RW 8] Aggregation command. */
664#define DORQ_REG_AGG_CMD3 0x17006c
665/* [RW 28] UCM Header. */
666#define DORQ_REG_CMHEAD_RX 0x170050
667/* [RW 32] Doorbell address for RBC doorbells (function 0). */
668#define DORQ_REG_DB_ADDR0 0x17008c
669/* [RW 5] Interrupt mask register #0 read/write */
670#define DORQ_REG_DORQ_INT_MASK 0x170180
671/* [R 5] Interrupt register #0 read */
672#define DORQ_REG_DORQ_INT_STS 0x170174
673/* [RC 5] Interrupt register #0 read clear */
674#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
675/* [RW 2] Parity mask register #0 read/write */
676#define DORQ_REG_DORQ_PRTY_MASK 0x170190
677/* [R 2] Parity register #0 read */
678#define DORQ_REG_DORQ_PRTY_STS 0x170184
679/* [RW 8] The address to write the DPM CID to STORM. */
680#define DORQ_REG_DPM_CID_ADDR 0x170044
681/* [RW 5] The DPM mode CID extraction offset. */
682#define DORQ_REG_DPM_CID_OFST 0x170030
683/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
684#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c
685/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
686#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078
687/* [R 13] Current value of the DQ FIFO fill level according to following
688 pointer. The range is 0 - 256 FIFO rows; where each row stands for the
689 doorbell. */
690#define DORQ_REG_DQ_FILL_LVLF 0x1700a4
691/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
692 equal to full threshold; reset on full clear. */
693#define DORQ_REG_DQ_FULL_ST 0x1700c0
694/* [RW 28] The value sent to CM header in the case of CFC load error. */
695#define DORQ_REG_ERR_CMHEAD 0x170058
696#define DORQ_REG_IF_EN 0x170004
697#define DORQ_REG_MODE_ACT 0x170008
698/* [RW 5] The normal mode CID extraction offset. */
699#define DORQ_REG_NORM_CID_OFST 0x17002c
700/* [RW 28] TCM Header when only TCP context is loaded. */
701#define DORQ_REG_NORM_CMHEAD_TX 0x17004c
702/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
703 Interface. */
704#define DORQ_REG_OUTST_REQ 0x17003c
705#define DORQ_REG_REGN 0x170038
706/* [R 4] Current value of response A counter credit. Initial credit is
707 configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
708 register. */
709#define DORQ_REG_RSPA_CRD_CNT 0x1700ac
710/* [R 4] Current value of response B counter credit. Initial credit is
711 configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
712 register. */
713#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
714/* [RW 4] The initial credit at the Doorbell Response Interface. The write
715 writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
716 read reads this written value. */
717#define DORQ_REG_RSP_INIT_CRD 0x170048
718/* [RW 4] Initial activity counter value on the load request; when the
719 shortcut is done. */
720#define DORQ_REG_SHRT_ACT_CNT 0x170070
721/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
722#define DORQ_REG_SHRT_CMHEAD 0x170054
723#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
724#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
725#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
726#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
727#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
728#define HC_REG_AGG_INT_0 0x108050
729#define HC_REG_AGG_INT_1 0x108054
730#define HC_REG_ATTN_BIT 0x108120
731#define HC_REG_ATTN_IDX 0x108100
732#define HC_REG_ATTN_MSG0_ADDR_L 0x108018
733#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
734#define HC_REG_ATTN_NUM_P0 0x108038
735#define HC_REG_ATTN_NUM_P1 0x10803c
736#define HC_REG_COMMAND_REG 0x108180
737#define HC_REG_CONFIG_0 0x108000
738#define HC_REG_CONFIG_1 0x108004
739#define HC_REG_FUNC_NUM_P0 0x1080ac
740#define HC_REG_FUNC_NUM_P1 0x1080b0
741/* [RW 3] Parity mask register #0 read/write */
742#define HC_REG_HC_PRTY_MASK 0x1080a0
743/* [R 3] Parity register #0 read */
744#define HC_REG_HC_PRTY_STS 0x108094
745#define HC_REG_INT_MASK 0x108108
746#define HC_REG_LEADING_EDGE_0 0x108040
747#define HC_REG_LEADING_EDGE_1 0x108048
748#define HC_REG_P0_PROD_CONS 0x108200
749#define HC_REG_P1_PROD_CONS 0x108400
750#define HC_REG_PBA_COMMAND 0x108140
751#define HC_REG_PCI_CONFIG_0 0x108010
752#define HC_REG_PCI_CONFIG_1 0x108014
753#define HC_REG_STATISTIC_COUNTERS 0x109000
754#define HC_REG_TRAILING_EDGE_0 0x108044
755#define HC_REG_TRAILING_EDGE_1 0x10804c
756#define HC_REG_UC_RAM_ADDR_0 0x108028
757#define HC_REG_UC_RAM_ADDR_1 0x108030
758#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
759#define HC_REG_VQID_0 0x108008
760#define HC_REG_VQID_1 0x10800c
761#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
762#define MCP_REG_MCPR_NVM_ADDR 0x8640c
763#define MCP_REG_MCPR_NVM_CFG4 0x8642c
764#define MCP_REG_MCPR_NVM_COMMAND 0x86400
765#define MCP_REG_MCPR_NVM_READ 0x86410
766#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
767#define MCP_REG_MCPR_NVM_WRITE 0x86408
768#define MCP_REG_MCPR_SCRATCH 0xa0000
769#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
770#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
771/* [R 32] read first 32 bit after inversion of function 0. mapped as
772 follows: [0] NIG attention for function0; [1] NIG attention for
773 function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
774 [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
775 GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
776 glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
777 [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
778 MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
779 Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
780 interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
781 error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
782 interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
783 Parity error; [31] PBF Hw interrupt; */
784#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c
785#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430
786/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
787 NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
788 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
789 [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
790 PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
791 function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
792 Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
793 mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
794 BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
795 Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
796 interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
797 Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
798 interrupt; */
799#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434
800/* [R 32] read second 32 bit after inversion of function 0. mapped as
801 follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
802 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
803 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
804 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
805 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
806 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
807 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
808 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
809 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
810 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
811 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
812 interrupt; */
813#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438
814#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c
815/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
816 PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
817 [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
818 [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
819 XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
820 DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
821 error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
822 PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
823 [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
824 [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
825 [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
826 [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
827#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440
828/* [R 32] read third 32 bit after inversion of function 0. mapped as
829 follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
830 error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
831 PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
832 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
833 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
834 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
835 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
836 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
837 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
838 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
839 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
840 attn1; */
841#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444
842#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448
843/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
844 CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
845 Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
846 Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
847 error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
848 interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
849 MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
850 Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
851 timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
852 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
853 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
854 timers attn_4 func1; [30] General attn0; [31] General attn1; */
855#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c
856/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
857 follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
858 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
859 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
860 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
861 [14] General attn16; [15] General attn17; [16] General attn18; [17]
862 General attn19; [18] General attn20; [19] General attn21; [20] Main power
863 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
864 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
865 Latched timeout attention; [27] GRC Latched reserved access attention;
866 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
867 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
868#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450
869#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454
870/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
871 General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
872 [4] General attn6; [5] General attn7; [6] General attn8; [7] General
873 attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
874 General attn13; [12] General attn14; [13] General attn15; [14] General
875 attn16; [15] General attn17; [16] General attn18; [17] General attn19;
876 [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
877 RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
878 RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
879 attention; [27] GRC Latched reserved access attention; [28] MCP Latched
880 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
881 ump_tx_parity; [31] MCP Latched scpad_parity; */
882#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
883/* [W 14] write to this register results with the clear of the latched
884 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
885 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
886 latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
887 GRC Latched reserved access attention; one in d7 clears Latched
888 rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
889 Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
890 ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
891 pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
892 from this register return zero */
893#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
894/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
895 as follows: [0] NIG attention for function0; [1] NIG attention for
896 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
897 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
898 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
899 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
900 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
901 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
902 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
903 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
904 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
905 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
906 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
907#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
908#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
909#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
910#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
911#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
912#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
913#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
914/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
915 as follows: [0] NIG attention for function0; [1] NIG attention for
916 function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
917 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
918 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
919 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
920 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
921 SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
922 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
923 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
924 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
925 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
926 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
927#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
928#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
929#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
930#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
931#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
932#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
933#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
934/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
935 as follows: [0] NIG attention for function0; [1] NIG attention for
936 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
937 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
938 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
939 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
940 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
941 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
942 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
943 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
944 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
945 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
946 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
947#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
948#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
949/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
950 as follows: [0] NIG attention for function0; [1] NIG attention for
951 function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
952 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
953 GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
954 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
955 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
956 SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
957 indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
958 [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
959 SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
960 TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
961 TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
962#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc
963#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c
964/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
965 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
966 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
967 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
968 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
969 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
970 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
971 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
972 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
973 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
974 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
975 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
976 interrupt; */
977#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070
978#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080
979/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
980 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
981 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
982 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
983 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
984 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
985 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
986 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
987 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
988 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
989 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
990 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
991 interrupt; */
992#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
993#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
994/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
995 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
996 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
997 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
998 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
999 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1000 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1001 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1002 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1003 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1004 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1005 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1006 interrupt; */
1007#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
1008#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
1009/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
1010 as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
1011 Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
1012 interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
1013 error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
1014 interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
1015 NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
1016 [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
1017 interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
1018 Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
1019 Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
1020 Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
1021 interrupt; */
1022#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
1023#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
1024/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
1025 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1026 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1027 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1028 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1029 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1030 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1031 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1032 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1033 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1034 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1035 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1036 attn1; */
1037#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074
1038#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084
1039/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
1040 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1041 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1042 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1043 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1044 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1045 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1046 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1047 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1048 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1049 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1050 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1051 attn1; */
1052#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
1053#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
1054/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
1055 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1056 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1057 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1058 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1059 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1060 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1061 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1062 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1063 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1064 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1065 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1066 attn1; */
1067#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
1068#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
1069/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
1070 as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
1071 Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
1072 [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
1073 interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
1074 error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
1075 Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
1076 pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
1077 MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
1078 SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
1079 timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
1080 func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
1081 attn1; */
1082#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
1083#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
1084/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
1085 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1086 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1087 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1088 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1089 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1090 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1091 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1092 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1093 Latched timeout attention; [27] GRC Latched reserved access attention;
1094 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1095 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1096#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
1097#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
1098#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
1099#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
1100#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
1101#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
1102/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
1103 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1104 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1105 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1106 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1107 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1108 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1109 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1110 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1111 Latched timeout attention; [27] GRC Latched reserved access attention;
1112 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1113 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1114#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
1115#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
1116#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
1117#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
1118#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
1119#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
1120/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
1121 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1122 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1123 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1124 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1125 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1126 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1127 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1128 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1129 Latched timeout attention; [27] GRC Latched reserved access attention;
1130 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1131 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1132#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
1133#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
1134/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
1135 as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
1136 General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
1137 [7] General attn9; [8] General attn10; [9] General attn11; [10] General
1138 attn12; [11] General attn13; [12] General attn14; [13] General attn15;
1139 [14] General attn16; [15] General attn17; [16] General attn18; [17]
1140 General attn19; [18] General attn20; [19] General attn21; [20] Main power
1141 interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
1142 Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
1143 Latched timeout attention; [27] GRC Latched reserved access attention;
1144 [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
1145 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1146#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
1147#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
1148/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
1149 128 bit vector */
1150#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
1151#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004
1152#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
1153#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
1154#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
1155#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
1156#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c
1157#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
1158#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
1159#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
1160#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
1161#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
1162#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
1163#define MISC_REG_AEU_GENERAL_MASK 0xa61c
1164/* [RW 32] first 32b for inverting the input for function 0; for each bit:
1165 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
1166 function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
1167 [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
1168 [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
1169 function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
1170 Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
1171 SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
1172 for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
1173 Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
1174 interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
1175 Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
1176 Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
1177#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c
1178#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c
1179/* [RW 32] second 32b for inverting the input for function 0; for each bit:
1180 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
1181 error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
1182 interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
1183 Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
1184 interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
1185 DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
1186 error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
1187 PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
1188 [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
1189 [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
1190 [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
1191 [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
1192#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
1193#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
1194/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
1195 [9:8] = raserved. Zero = mask; one = unmask */
1196#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
1197#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
1198/* [RW 1] If set a system kill occurred */
1199#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
1200/* [RW 32] Represent the status of the input vector to the AEU when a system
1201 kill occurred. The register is reset in por reset. Mapped as follows: [0]
1202 NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
1203 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
1204 [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
1205 PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
1206 function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
1207 Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
1208 mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
1209 BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
1210 Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
1211 interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
1212 Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
1213 interrupt; */
1214#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
1215#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
1216#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
1217#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
1218/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
1219 Port. */
1220#define MISC_REG_BOND_ID 0xa400
1221/* [R 8] These bits indicate the metal revision of the chip. This value
1222 starts at 0x00 for each all-layer tape-out and increments by one for each
1223 tape-out. */
1224#define MISC_REG_CHIP_METAL 0xa404
1225/* [R 16] These bits indicate the part number for the chip. */
1226#define MISC_REG_CHIP_NUM 0xa408
1227/* [R 4] These bits indicate the base revision of the chip. This value
1228 starts at 0x0 for the A0 tape-out and increments by one for each
1229 all-layer tape-out. */
1230#define MISC_REG_CHIP_REV 0xa40c
1231/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1232 32 clients. Each client can be controlled by one driver only. One in each
1233 bit represent that this driver control the appropriate client (Ex: bit 5
1234 is set means this driver control client number 5). addr1 = set; addr0 =
1235 clear; read from both addresses will give the same result = status. write
1236 to address 1 will set a request to control all the clients that their
1237 appropriate bit (in the write command) is set. if the client is free (the
1238 appropriate bit in all the other drivers is clear) one will be written to
1239 that driver register; if the client isn't free the bit will remain zero.
1240 if the appropriate bit is set (the driver request to gain control on a
1241 client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
1242 interrupt will be asserted). write to address 0 will set a request to
1243 free all the clients that their appropriate bit (in the write command) is
1244 set. if the appropriate bit is clear (the driver request to free a client
1245 it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
1246 be asserted). */
1247#define MISC_REG_DRIVER_CONTROL_1 0xa510
1248#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
1249/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
1250 only. */
1251#define MISC_REG_E1HMF_MODE 0xa5f8
1252/* [RW 32] Debug only: spare RW register reset by core reset */
1253#define MISC_REG_GENERIC_CR_0 0xa460
1254/* [RW 32] Debug only: spare RW register reset by por reset */
1255#define MISC_REG_GENERIC_POR_1 0xa474
1256/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
1257 these bits is written as a '1'; the corresponding SPIO bit will turn off
1258 it's drivers and become an input. This is the reset state of all GPIO
1259 pins. The read value of these bits will be a '1' if that last command
1260 (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
1261 [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
1262 as a '1'; the corresponding GPIO bit will drive low. The read value of
1263 these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
1264 this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
1265 SET When any of these bits is written as a '1'; the corresponding GPIO
1266 bit will drive high (if it has that capability). The read value of these
1267 bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
1268 bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
1269 RO; These bits indicate the read value of each of the eight GPIO pins.
1270 This is the result value of the pin; not the drive value. Writing these
1271 bits will have not effect. */
1272#define MISC_REG_GPIO 0xa490
1273/* [RW 8] These bits enable the GPIO_INTs to signals event to the
1274 IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
1275 p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
1276 [7] p1_gpio_3; */
1277#define MISC_REG_GPIO_EVENT_EN 0xa2bc
1278/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
1279 '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
1280 This will acknowledge an interrupt on the falling edge of corresponding
1281 GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
1282 Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
1283 register. This will acknowledge an interrupt on the rising edge of
1284 corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
1285 OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
1286 value. When the ~INT_STATE bit is set; this bit indicates the OLD value
1287 of the pin such that if ~INT_STATE is set and this bit is '0'; then the
1288 interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
1289 is '1'; then the interrupt is due to a high to low edge (reset value 0).
1290 [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
1291 current GPIO interrupt state for each GPIO pin. This bit is cleared when
1292 the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
1293 set when the GPIO input does not match the current value in #OLD_VALUE
1294 (reset value 0). */
1295#define MISC_REG_GPIO_INT 0xa494
1296/* [R 28] this field hold the last information that caused reserved
1297 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1298 [27:24] the master that caused the attention - according to the following
1299 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1300 dbu; 8 = dmae */
1301#define MISC_REG_GRC_RSV_ATTN 0xa3c0
1302/* [R 28] this field hold the last information that caused timeout
1303 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1304 [27:24] the master that caused the attention - according to the following
1305 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1306 dbu; 8 = dmae */
1307#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
1308/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
1309 access that does not finish within
1310 ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
1311 cleared; this timeout is disabled. If this timeout occurs; the GRC shall
1312 assert it attention output. */
1313#define MISC_REG_GRC_TIMEOUT_EN 0xa280
1314/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
1315 the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
1316 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
1317 (reset value 001) Charge pump current control; 111 for 720u; 011 for
1318 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
1319 Global bias control; When bit 7 is high bias current will be 10 0gh; When
1320 bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
1321 Pll_observe (reset value 010) Bits to control observability. bit 10 is
1322 for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
1323 (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
1324 and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
1325 sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
1326 internally). [14] reserved (reset value 0) Reset for VCO sequencer is
1327 connected to RESET input directly. [15] capRetry_en (reset value 0)
1328 enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
1329 value 0) bit to continuously monitor vco freq (inverted). [17]
1330 freqDetRestart_en (reset value 0) bit to enable restart when not freq
1331 locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
1332 retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
1333 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
1334 pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
1335 (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
1336 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
1337 bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
1338 enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
1339 capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
1340 restart. [27] capSelectM_en (reset value 0) bit to enable cap select
1341 register bits. */
1342#define MISC_REG_LCPLL_CTRL_1 0xa2a4
1343#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
1344/* [RW 4] Interrupt mask register #0 read/write */
1345#define MISC_REG_MISC_INT_MASK 0xa388
1346/* [RW 1] Parity mask register #0 read/write */
1347#define MISC_REG_MISC_PRTY_MASK 0xa398
1348/* [R 1] Parity register #0 read */
1349#define MISC_REG_MISC_PRTY_STS 0xa38c
1350#define MISC_REG_NIG_WOL_P0 0xa270
1351#define MISC_REG_NIG_WOL_P1 0xa274
1352/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
1353 assertion */
1354#define MISC_REG_PCIE_HOT_RESET 0xa618
1355/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
1356 inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
1357 divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
1358 divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
1359 divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
1360 divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
1361 freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
1362 (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
1363 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
1364 Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
1365 value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
1366 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
1367 [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
1368 Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
1369 testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
1370 testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
1371 testa_en (reset value 0); */
1372#define MISC_REG_PLL_STORM_CTRL_1 0xa294
1373#define MISC_REG_PLL_STORM_CTRL_2 0xa298
1374#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
1375#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
1376/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
1377 write/read zero = the specific block is in reset; addr 0-wr- the write
1378 value will be written to the register; addr 1-set - one will be written
1379 to all the bits that have the value of one in the data written (bits that
1380 have the value of zero will not be change) ; addr 2-clear - zero will be
1381 written to all the bits that have the value of one in the data written
1382 (bits that have the value of zero will not be change); addr 3-ignore;
1383 read ignore from all addr except addr 00; inside order of the bits is:
1384 [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
1385 [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
1386 rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
1387 [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
1388 Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
1389 rst_pxp_rq_rd_wr; 31:17] reserved */
1390#define MISC_REG_RESET_REG_2 0xa590
1391/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
1392 shared with the driver resides */
1393#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
1394/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
1395 the corresponding SPIO bit will turn off it's drivers and become an
1396 input. This is the reset state of all SPIO pins. The read value of these
1397 bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
1398 bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
1399 is written as a '1'; the corresponding SPIO bit will drive low. The read
1400 value of these bits will be a '1' if that last command (#SET; #CLR; or
1401#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
1402 these bits is written as a '1'; the corresponding SPIO bit will drive
1403 high (if it has that capability). The read value of these bits will be a
1404 '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
1405 (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
1406 each of the eight SPIO pins. This is the result value of the pin; not the
1407 drive value. Writing these bits will have not effect. Each 8 bits field
1408 is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
1409 from VAUX. (This is an output pin only; the FLOAT field is not applicable
1410 for this pin); [1] VAUX Disable; when pulsed low; disables supply form
1411 VAUX. (This is an output pin only; FLOAT field is not applicable for this
1412 pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
1413 select VAUX supply. (This is an output pin only; it is not controlled by
1414 the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
1415 field is not applicable for this pin; only the VALUE fields is relevant -
1416 it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
1417 Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
1418 device ID select; read by UMP firmware. */
1419#define MISC_REG_SPIO 0xa4fc
1420/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
1421 according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
1422 [7:0] reserved */
1423#define MISC_REG_SPIO_EVENT_EN 0xa2b8
1424/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
1425 corresponding bit in the #OLD_VALUE register. This will acknowledge an
1426 interrupt on the falling edge of corresponding SPIO input (reset value
1427 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
1428 in the #OLD_VALUE register. This will acknowledge an interrupt on the
1429 rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
1430 RO; These bits indicate the old value of the SPIO input value. When the
1431 ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
1432 that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
1433 to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
1434 interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
1435 RO; These bits indicate the current SPIO interrupt state for each SPIO
1436 pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
1437 command bit is written. This bit is set when the SPIO input does not
1438 match the current value in #OLD_VALUE (reset value 0). */
1439#define MISC_REG_SPIO_INT 0xa500
1440/* [RW 32] reload value for counter 4 if reload; the value will be reload if
1441 the counter reached zero and the reload bit
1442 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
1443#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
1444/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
1445 in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
1446 timer 8 */
1447#define MISC_REG_SW_TIMER_VAL 0xa5c0
1448/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
1449 loaded; 0-prepare; -unprepare */
1450#define MISC_REG_UNPREPARED 0xa424
1451#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
1452#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
1453#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
1454#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
1455#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
1456#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
1457#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
1458#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
1459#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18)
1460/* [RW 1] Input enable for RX_BMAC0 IF */
1461#define NIG_REG_BMAC0_IN_EN 0x100ac
1462/* [RW 1] output enable for TX_BMAC0 IF */
1463#define NIG_REG_BMAC0_OUT_EN 0x100e0
1464/* [RW 1] output enable for TX BMAC pause port 0 IF */
1465#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110
1466/* [RW 1] output enable for RX_BMAC0_REGS IF */
1467#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8
1468/* [RW 1] output enable for RX BRB1 port0 IF */
1469#define NIG_REG_BRB0_OUT_EN 0x100f8
1470/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
1471#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4
1472/* [RW 1] output enable for RX BRB1 port1 IF */
1473#define NIG_REG_BRB1_OUT_EN 0x100fc
1474/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
1475#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
1476/* [RW 1] output enable for RX BRB1 LP IF */
1477#define NIG_REG_BRB_LB_OUT_EN 0x10100
1478/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
1479 error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
1480 72:73]-vnic_num; 81:74]-sideband_info */
1481#define NIG_REG_DEBUG_PACKET_LB 0x10800
1482/* [RW 1] Input enable for TX Debug packet */
1483#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
1484/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
1485 packets from PBFare not forwarded to the MAC and just deleted from FIFO.
1486 First packet may be deleted from the middle. And last packet will be
1487 always deleted till the end. */
1488#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060
1489/* [RW 1] Output enable to EMAC0 */
1490#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120
1491/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
1492 to emac for port0; other way to bmac for port0 */
1493#define NIG_REG_EGRESS_EMAC0_PORT 0x10058
1494/* [RW 1] Input enable for TX PBF user packet port0 IF */
1495#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
1496/* [RW 1] Input enable for TX PBF user packet port1 IF */
1497#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0
1498/* [RW 1] Input enable for TX UMP management packet port0 IF */
1499#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4
1500/* [RW 1] Input enable for RX_EMAC0 IF */
1501#define NIG_REG_EMAC0_IN_EN 0x100a4
1502/* [RW 1] output enable for TX EMAC pause port 0 IF */
1503#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118
1504/* [R 1] status from emac0. This bit is set when MDINT from either the
1505 EXT_MDINT pin or from the Copper PHY is driven low. This condition must
1506 be cleared in the attached PHY device that is driving the MINT pin. */
1507#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494
1508/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
1509 are described in appendix A. In order to access the BMAC0 registers; the
1510 base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
1511 added to each BMAC register offset */
1512#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00
1513/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
1514 are described in appendix A. In order to access the BMAC0 registers; the
1515 base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
1516 added to each BMAC register offset */
1517#define NIG_REG_INGRESS_BMAC1_MEM 0x11000
1518/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
1519#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0
1520/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
1521 packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
1522#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
1523/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
1524 logic for interrupts must be used. Enable per bit of interrupt of
1525 ~latch_status.latch_status */
1526#define NIG_REG_LATCH_BC_0 0x16210
1527/* [RW 27] Latch for each interrupt from Unicore.b[0]
1528 status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
1529 b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
1530 b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
1531 b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
1532 b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
1533 b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
1534 b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
1535 b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
1536 b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
1537 b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
1538 b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
1539 b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
1540#define NIG_REG_LATCH_STATUS_0 0x18000
1541/* [RW 1] led 10g for port 0 */
1542#define NIG_REG_LED_10G_P0 0x10320
1543/* [RW 1] led 10g for port 1 */
1544#define NIG_REG_LED_10G_P1 0x10324
1545/* [RW 1] Port0: This bit is set to enable the use of the
1546 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
1547 defined below. If this bit is cleared; then the blink rate will be about
1548 8Hz. */
1549#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318
1550/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
1551 Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
1552 is reset to 0x080; giving a default blink period of approximately 8Hz. */
1553#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
1554/* [RW 1] Port0: If set along with the
1555 ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
1556 bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
1557 bit; the Traffic LED will blink with the blink rate specified in
1558 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
1559 ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
1560 fields. */
1561#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308
1562/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
1563 Traffic LED will then be controlled via bit ~nig_registers_
1564 led_control_traffic_p0.led_control_traffic_p0 and bit
1565 ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
1566#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8
1567/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
1568 turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
1569 set; the LED will blink with blink rate specified in
1570 ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
1571 ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
1572 fields. */
1573#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300
1574/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
1575 9-11PHY7; 12 MAC4; 13-15 PHY10; */
1576#define NIG_REG_LED_MODE_P0 0x102f0
1577/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
1578 tsdm enable; b2- usdm enable */
1579#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
1580#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
1581/* [RW 1] SAFC enable for port0. This register may get 1 only when
1582 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
1583 port */
1584#define NIG_REG_LLFC_ENABLE_0 0x16208
1585/* [RW 16] classes are high-priority for port0 */
1586#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
1587/* [RW 16] classes are low-priority for port0 */
1588#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
1589/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
1590#define NIG_REG_LLFC_OUT_EN_0 0x160c8
1591#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
1592#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
1593#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
1594#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
1595/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1596#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
1597/* [RW 2] Determine the classification participants. 0: no classification.1:
1598 classification upon VLAN id. 2: classification upon MAC address. 3:
1599 classification upon both VLAN id & MAC addr. */
1600#define NIG_REG_LLH0_CLS_TYPE 0x16080
1601/* [RW 32] cm header for llh0 */
1602#define NIG_REG_LLH0_CM_HEADER 0x1007c
1603#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
1604#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
1605/* [RW 16] destination TCP address 1. The LLH will look for this address in
1606 all incoming packets. */
1607#define NIG_REG_LLH0_DEST_TCP_0 0x10220
1608/* [RW 16] destination UDP address 1 The LLH will look for this address in
1609 all incoming packets. */
1610#define NIG_REG_LLH0_DEST_UDP_0 0x10214
1611#define NIG_REG_LLH0_ERROR_MASK 0x1008c
1612/* [RW 8] event id for llh0 */
1613#define NIG_REG_LLH0_EVENT_ID 0x10084
1614#define NIG_REG_LLH0_FUNC_EN 0x160fc
1615#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
1616/* [RW 1] Determine the IP version to look for in
1617 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
1618#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
1619/* [RW 1] t bit for llh0 */
1620#define NIG_REG_LLH0_T_BIT 0x10074
1621/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
1622#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
1623/* [RW 8] init credit counter for port0 in LLH */
1624#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
1625#define NIG_REG_LLH0_XCM_MASK 0x10130
1626#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
1627/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1628#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
1629/* [RW 2] Determine the classification participants. 0: no classification.1:
1630 classification upon VLAN id. 2: classification upon MAC address. 3:
1631 classification upon both VLAN id & MAC addr. */
1632#define NIG_REG_LLH1_CLS_TYPE 0x16084
1633/* [RW 32] cm header for llh1 */
1634#define NIG_REG_LLH1_CM_HEADER 0x10080
1635#define NIG_REG_LLH1_ERROR_MASK 0x10090
1636/* [RW 8] event id for llh1 */
1637#define NIG_REG_LLH1_EVENT_ID 0x10088
1638/* [RW 8] init credit counter for port1 in LLH */
1639#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
1640#define NIG_REG_LLH1_XCM_MASK 0x10134
1641/* [RW 1] When this bit is set; the LLH will expect all packets to be with
1642 e1hov */
1643#define NIG_REG_LLH_E1HOV_MODE 0x160d8
1644/* [RW 1] When this bit is set; the LLH will classify the packet before
1645 sending it to the BRB or calculating WoL on it. */
1646#define NIG_REG_LLH_MF_MODE 0x16024
1647#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
1648#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
1649/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
1650#define NIG_REG_NIG_EMAC0_EN 0x1003c
1651/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
1652#define NIG_REG_NIG_EMAC1_EN 0x10040
1653/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
1654 EMAC0 to strip the CRC from the ingress packets. */
1655#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
1656/* [R 32] Interrupt register #0 read */
1657#define NIG_REG_NIG_INT_STS_0 0x103b0
1658#define NIG_REG_NIG_INT_STS_1 0x103c0
1659/* [R 32] Parity register #0 read */
1660#define NIG_REG_NIG_PRTY_STS 0x103d0
1661/* [RW 1] Pause enable for port0. This register may get 1 only when
1662 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
1663 port */
1664#define NIG_REG_PAUSE_ENABLE_0 0x160c0
1665/* [RW 1] Input enable for RX PBF LP IF */
1666#define NIG_REG_PBF_LB_IN_EN 0x100b4
1667/* [RW 1] Value of this register will be transmitted to port swap when
1668 ~nig_registers_strap_override.strap_override =1 */
1669#define NIG_REG_PORT_SWAP 0x10394
1670/* [RW 1] output enable for RX parser descriptor IF */
1671#define NIG_REG_PRS_EOP_OUT_EN 0x10104
1672/* [RW 1] Input enable for RX parser request IF */
1673#define NIG_REG_PRS_REQ_IN_EN 0x100b8
1674/* [RW 5] control to serdes - CL45 DEVAD */
1675#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370
1676/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
1677#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c
1678/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
1679#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374
1680/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
1681#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578
1682/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
1683 for port0 */
1684#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
1685/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
1686 for port0 */
1687#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
1688/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
1689 between 1024 and 1522 bytes for port0 */
1690#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
1691/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
1692 between 1523 bytes and above for port0 */
1693#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
1694/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
1695 for port1 */
1696#define NIG_REG_STAT1_BRB_DISCARD 0x10628
1697/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
1698 between 1024 and 1522 bytes for port1 */
1699#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
1700/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
1701 between 1523 bytes and above for port1 */
1702#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
1703/* [WB_R 64] Rx statistics : User octets received for LP */
1704#define NIG_REG_STAT2_BRB_OCTET 0x107e0
1705#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
1706#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
1707/* [RW 1] port swap mux selection. If this register equal to 0 then port
1708 swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
1709 ort swap is equal to ~nig_registers_port_swap.port_swap */
1710#define NIG_REG_STRAP_OVERRIDE 0x10398
1711/* [RW 1] output enable for RX_XCM0 IF */
1712#define NIG_REG_XCM0_OUT_EN 0x100f0
1713/* [RW 1] output enable for RX_XCM1 IF */
1714#define NIG_REG_XCM1_OUT_EN 0x100f4
1715/* [RW 1] control to xgxs - remote PHY in-band MDIO */
1716#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
1717/* [RW 5] control to xgxs - CL45 DEVAD */
1718#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
1719/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
1720#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
1721/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
1722#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
1723/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
1724#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680
1725/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
1726#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684
1727/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
1728#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8
1729/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
1730#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0
1731#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0)
1732#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
1733#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
1734#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
1735#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
1736/* [RW 1] Disable processing further tasks from port 0 (after ending the
1737 current task in process). */
1738#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
1739/* [RW 1] Disable processing further tasks from port 1 (after ending the
1740 current task in process). */
1741#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060
1742/* [RW 1] Disable processing further tasks from port 4 (after ending the
1743 current task in process). */
1744#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
1745#define PBF_REG_IF_ENABLE_REG 0x140044
1746/* [RW 1] Init bit. When set the initial credits are copied to the credit
1747 registers (except the port credits). Should be set and then reset after
1748 the configuration of the block has ended. */
1749#define PBF_REG_INIT 0x140000
1750/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
1751 copied to the credit register. Should be set and then reset after the
1752 configuration of the port has ended. */
1753#define PBF_REG_INIT_P0 0x140004
1754/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
1755 copied to the credit register. Should be set and then reset after the
1756 configuration of the port has ended. */
1757#define PBF_REG_INIT_P1 0x140008
1758/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
1759 copied to the credit register. Should be set and then reset after the
1760 configuration of the port has ended. */
1761#define PBF_REG_INIT_P4 0x14000c
1762/* [RW 1] Enable for mac interface 0. */
1763#define PBF_REG_MAC_IF0_ENABLE 0x140030
1764/* [RW 1] Enable for mac interface 1. */
1765#define PBF_REG_MAC_IF1_ENABLE 0x140034
1766/* [RW 1] Enable for the loopback interface. */
1767#define PBF_REG_MAC_LB_ENABLE 0x140040
1768/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
1769 not suppoterd. */
1770#define PBF_REG_P0_ARB_THRSH 0x1400e4
1771/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
1772#define PBF_REG_P0_CREDIT 0x140200
1773/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
1774 lines. */
1775#define PBF_REG_P0_INIT_CRD 0x1400d0
1776/* [RW 1] Indication that pause is enabled for port 0. */
1777#define PBF_REG_P0_PAUSE_ENABLE 0x140014
1778/* [R 8] Number of tasks in port 0 task queue. */
1779#define PBF_REG_P0_TASK_CNT 0x140204
1780/* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */
1781#define PBF_REG_P1_CREDIT 0x140208
1782/* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte
1783 lines. */
1784#define PBF_REG_P1_INIT_CRD 0x1400d4
1785/* [R 8] Number of tasks in port 1 task queue. */
1786#define PBF_REG_P1_TASK_CNT 0x14020c
1787/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
1788#define PBF_REG_P4_CREDIT 0x140210
1789/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
1790 lines. */
1791#define PBF_REG_P4_INIT_CRD 0x1400e0
1792/* [R 8] Number of tasks in port 4 task queue. */
1793#define PBF_REG_P4_TASK_CNT 0x140214
1794/* [RW 5] Interrupt mask register #0 read/write */
1795#define PBF_REG_PBF_INT_MASK 0x1401d4
1796/* [R 5] Interrupt register #0 read */
1797#define PBF_REG_PBF_INT_STS 0x1401c8
1798#define PB_REG_CONTROL 0
1799/* [RW 2] Interrupt mask register #0 read/write */
1800#define PB_REG_PB_INT_MASK 0x28
1801/* [R 2] Interrupt register #0 read */
1802#define PB_REG_PB_INT_STS 0x1c
1803/* [RW 4] Parity mask register #0 read/write */
1804#define PB_REG_PB_PRTY_MASK 0x38
1805/* [R 4] Parity register #0 read */
1806#define PB_REG_PB_PRTY_STS 0x2c
1807#define PRS_REG_A_PRSU_20 0x40134
1808/* [R 8] debug only: CFC load request current credit. Transaction based. */
1809#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
1810/* [R 8] debug only: CFC search request current credit. Transaction based. */
1811#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168
1812/* [RW 6] The initial credit for the search message to the CFC interface.
1813 Credit is transaction based. */
1814#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
1815/* [RW 24] CID for port 0 if no match */
1816#define PRS_REG_CID_PORT_0 0x400fc
1817/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
1818 load response is reset and packet type is 0. Used in packet start message
1819 to TCM. */
1820#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc
1821#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0
1822#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
1823#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
1824#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
1825#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
1826/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
1827 load response is set and packet type is 0. Used in packet start message
1828 to TCM. */
1829#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc
1830#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0
1831#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
1832#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
1833#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
1834#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
1835/* [RW 32] The CM header for a match and packet type 1 for loopback port.
1836 Used in packet start message to TCM. */
1837#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
1838#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0
1839#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4
1840#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8
1841/* [RW 32] The CM header for a match and packet type 0. Used in packet start
1842 message to TCM. */
1843#define PRS_REG_CM_HDR_TYPE_0 0x40078
1844#define PRS_REG_CM_HDR_TYPE_1 0x4007c
1845#define PRS_REG_CM_HDR_TYPE_2 0x40080
1846#define PRS_REG_CM_HDR_TYPE_3 0x40084
1847#define PRS_REG_CM_HDR_TYPE_4 0x40088
1848/* [RW 32] The CM header in case there was not a match on the connection */
1849#define PRS_REG_CM_NO_MATCH_HDR 0x400b8
1850/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
1851#define PRS_REG_E1HOV_MODE 0x401c8
1852/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
1853 start message to TCM. */
1854#define PRS_REG_EVENT_ID_1 0x40054
1855#define PRS_REG_EVENT_ID_2 0x40058
1856#define PRS_REG_EVENT_ID_3 0x4005c
1857/* [RW 16] The Ethernet type value for FCoE */
1858#define PRS_REG_FCOE_TYPE 0x401d0
1859/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
1860 load request message. */
1861#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
1862#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008
1863#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c
1864#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010
1865#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014
1866#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
1867#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
1868#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
1869/* [RW 4] The increment value to send in the CFC load request message */
1870#define PRS_REG_INC_VALUE 0x40048
1871/* [RW 1] If set indicates not to send messages to CFC on received packets */
1872#define PRS_REG_NIC_MODE 0x40138
1873/* [RW 8] The 8-bit event ID for cases where there is no match on the
1874 connection. Used in packet start message to TCM. */
1875#define PRS_REG_NO_MATCH_EVENT_ID 0x40070
1876/* [ST 24] The number of input CFC flush packets */
1877#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128
1878/* [ST 32] The number of cycles the Parser halted its operation since it
1879 could not allocate the next serial number */
1880#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130
1881/* [ST 24] The number of input packets */
1882#define PRS_REG_NUM_OF_PACKETS 0x40124
1883/* [ST 24] The number of input transparent flush packets */
1884#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c
1885/* [RW 8] Context region for received Ethernet packet with a match and
1886 packet type 0. Used in CFC load request message */
1887#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028
1888#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c
1889#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030
1890#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034
1891#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038
1892#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c
1893#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040
1894#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044
1895/* [R 2] debug only: Number of pending requests for CAC on port 0. */
1896#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174
1897/* [R 2] debug only: Number of pending requests for header parsing. */
1898#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170
1899/* [R 1] Interrupt register #0 read */
1900#define PRS_REG_PRS_INT_STS 0x40188
1901/* [RW 8] Parity mask register #0 read/write */
1902#define PRS_REG_PRS_PRTY_MASK 0x401a4
1903/* [R 8] Parity register #0 read */
1904#define PRS_REG_PRS_PRTY_STS 0x40198
1905/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
1906 request message */
1907#define PRS_REG_PURE_REGIONS 0x40024
1908/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
1909 serail number was released by SDM but cannot be used because a previous
1910 serial number was not released. */
1911#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154
1912/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
1913 serail number was released by SDM but cannot be used because a previous
1914 serial number was not released. */
1915#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
1916/* [R 4] debug only: SRC current credit. Transaction based. */
1917#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
1918/* [R 8] debug only: TCM current credit. Cycle based. */
1919#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
1920/* [R 8] debug only: TSDM current credit. Transaction based. */
1921#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
1922/* [R 6] Debug only: Number of used entries in the data FIFO */
1923#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
1924/* [R 7] Debug only: Number of used entries in the header FIFO */
1925#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
1926#define PXP2_REG_PGL_ADDR_88_F0 0x120534
1927#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
1928#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
1929#define PXP2_REG_PGL_ADDR_94_F0 0x120540
1930#define PXP2_REG_PGL_CONTROL0 0x120490
1931#define PXP2_REG_PGL_CONTROL1 0x120514
1932#define PXP2_REG_PGL_DEBUG 0x120520
1933/* [RW 32] third dword data of expansion rom request. this register is
1934 special. reading from it provides a vector outstanding read requests. if
1935 a bit is zero it means that a read request on the corresponding tag did
1936 not finish yet (not all completions have arrived for it) */
1937#define PXP2_REG_PGL_EXP_ROM2 0x120808
1938/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
1939 its[15:0]-address */
1940#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
1941#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8
1942#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc
1943#define PXP2_REG_PGL_INT_CSDM_3 0x120500
1944#define PXP2_REG_PGL_INT_CSDM_4 0x120504
1945#define PXP2_REG_PGL_INT_CSDM_5 0x120508
1946#define PXP2_REG_PGL_INT_CSDM_6 0x12050c
1947#define PXP2_REG_PGL_INT_CSDM_7 0x120510
1948/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
1949 its[15:0]-address */
1950#define PXP2_REG_PGL_INT_TSDM_0 0x120494
1951#define PXP2_REG_PGL_INT_TSDM_1 0x120498
1952#define PXP2_REG_PGL_INT_TSDM_2 0x12049c
1953#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0
1954#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4
1955#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8
1956#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac
1957#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0
1958/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
1959 its[15:0]-address */
1960#define PXP2_REG_PGL_INT_USDM_0 0x1204b4
1961#define PXP2_REG_PGL_INT_USDM_1 0x1204b8
1962#define PXP2_REG_PGL_INT_USDM_2 0x1204bc
1963#define PXP2_REG_PGL_INT_USDM_3 0x1204c0
1964#define PXP2_REG_PGL_INT_USDM_4 0x1204c4
1965#define PXP2_REG_PGL_INT_USDM_5 0x1204c8
1966#define PXP2_REG_PGL_INT_USDM_6 0x1204cc
1967#define PXP2_REG_PGL_INT_USDM_7 0x1204d0
1968/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
1969 its[15:0]-address */
1970#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4
1971#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8
1972#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc
1973#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0
1974#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4
1975#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8
1976#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec
1977#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0
1978/* [RW 3] this field allows one function to pretend being another function
1979 when accessing any BAR mapped resource within the device. the value of
1980 the field is the number of the function that will be accessed
1981 effectively. after software write to this bit it must read it in order to
1982 know that the new value is updated */
1983#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674
1984#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678
1985#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c
1986#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680
1987#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684
1988#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688
1989#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c
1990#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690
1991/* [R 1] this bit indicates that a read request was blocked because of
1992 bus_master_en was deasserted */
1993#define PXP2_REG_PGL_READ_BLOCKED 0x120568
1994#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
1995/* [R 18] debug only */
1996#define PXP2_REG_PGL_TXW_CDTS 0x12052c
1997/* [R 1] this bit indicates that a write request was blocked because of
1998 bus_master_en was deasserted */
1999#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564
2000#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0
2001#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4
2002#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8
2003#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4
2004#define PXP2_REG_PSWRQ_BW_ADD28 0x120228
2005#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8
2006#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4
2007#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8
2008#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc
2009#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0
2010#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c
2011#define PXP2_REG_PSWRQ_BW_L1 0x1202b0
2012#define PXP2_REG_PSWRQ_BW_L10 0x1202d4
2013#define PXP2_REG_PSWRQ_BW_L11 0x1202d8
2014#define PXP2_REG_PSWRQ_BW_L2 0x1202b4
2015#define PXP2_REG_PSWRQ_BW_L28 0x120318
2016#define PXP2_REG_PSWRQ_BW_L3 0x1202b8
2017#define PXP2_REG_PSWRQ_BW_L6 0x1202c4
2018#define PXP2_REG_PSWRQ_BW_L7 0x1202c8
2019#define PXP2_REG_PSWRQ_BW_L8 0x1202cc
2020#define PXP2_REG_PSWRQ_BW_L9 0x1202d0
2021#define PXP2_REG_PSWRQ_BW_RD 0x120324
2022#define PXP2_REG_PSWRQ_BW_UB1 0x120238
2023#define PXP2_REG_PSWRQ_BW_UB10 0x12025c
2024#define PXP2_REG_PSWRQ_BW_UB11 0x120260
2025#define PXP2_REG_PSWRQ_BW_UB2 0x12023c
2026#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0
2027#define PXP2_REG_PSWRQ_BW_UB3 0x120240
2028#define PXP2_REG_PSWRQ_BW_UB6 0x12024c
2029#define PXP2_REG_PSWRQ_BW_UB7 0x120250
2030#define PXP2_REG_PSWRQ_BW_UB8 0x120254
2031#define PXP2_REG_PSWRQ_BW_UB9 0x120258
2032#define PXP2_REG_PSWRQ_BW_WR 0x120328
2033#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000
2034#define PXP2_REG_PSWRQ_QM0_L2P 0x120038
2035#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
2036#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
2037#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
2038/* [RW 32] Interrupt mask register #0 read/write */
2039#define PXP2_REG_PXP2_INT_MASK_0 0x120578
2040/* [R 32] Interrupt register #0 read */
2041#define PXP2_REG_PXP2_INT_STS_0 0x12056c
2042#define PXP2_REG_PXP2_INT_STS_1 0x120608
2043/* [RC 32] Interrupt register #0 read clear */
2044#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
2045/* [RW 32] Parity mask register #0 read/write */
2046#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
2047#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
2048/* [R 32] Parity register #0 read */
2049#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
2050#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
2051/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
2052 indication about backpressure) */
2053#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
2054/* [R 8] Debug only: The blocks counter - number of unused block ids */
2055#define PXP2_REG_RD_BLK_CNT 0x120418
2056/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
2057 Must be bigger than 6. Normally should not be changed. */
2058#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c
2059/* [RW 2] CDU byte swapping mode configuration for master read requests */
2060#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404
2061/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
2062#define PXP2_REG_RD_DISABLE_INPUTS 0x120374
2063/* [R 1] PSWRD internal memories initialization is done */
2064#define PXP2_REG_RD_INIT_DONE 0x120370
2065/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2066 allocated for vq10 */
2067#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0
2068/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2069 allocated for vq11 */
2070#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4
2071/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2072 allocated for vq17 */
2073#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc
2074/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2075 allocated for vq18 */
2076#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0
2077/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2078 allocated for vq19 */
2079#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4
2080/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2081 allocated for vq22 */
2082#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
2083/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2084 allocated for vq25 */
2085#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
2086/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2087 allocated for vq6 */
2088#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
2089/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
2090 allocated for vq9 */
2091#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c
2092/* [RW 2] PBF byte swapping mode configuration for master read requests */
2093#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4
2094/* [R 1] Debug only: Indication if delivery ports are idle */
2095#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c
2096#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420
2097/* [RW 2] QM byte swapping mode configuration for master read requests */
2098#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8
2099/* [R 7] Debug only: The SR counter - number of unused sub request ids */
2100#define PXP2_REG_RD_SR_CNT 0x120414
2101/* [RW 2] SRC byte swapping mode configuration for master read requests */
2102#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400
2103/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
2104 be bigger than 1. Normally should not be changed. */
2105#define PXP2_REG_RD_SR_NUM_CFG 0x120408
2106/* [RW 1] Signals the PSWRD block to start initializing internal memories */
2107#define PXP2_REG_RD_START_INIT 0x12036c
2108/* [RW 2] TM byte swapping mode configuration for master read requests */
2109#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc
2110/* [RW 10] Bandwidth addition to VQ0 write requests */
2111#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc
2112/* [RW 10] Bandwidth addition to VQ12 read requests */
2113#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec
2114/* [RW 10] Bandwidth addition to VQ13 read requests */
2115#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0
2116/* [RW 10] Bandwidth addition to VQ14 read requests */
2117#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4
2118/* [RW 10] Bandwidth addition to VQ15 read requests */
2119#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8
2120/* [RW 10] Bandwidth addition to VQ16 read requests */
2121#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc
2122/* [RW 10] Bandwidth addition to VQ17 read requests */
2123#define PXP2_REG_RQ_BW_RD_ADD17 0x120200
2124/* [RW 10] Bandwidth addition to VQ18 read requests */
2125#define PXP2_REG_RQ_BW_RD_ADD18 0x120204
2126/* [RW 10] Bandwidth addition to VQ19 read requests */
2127#define PXP2_REG_RQ_BW_RD_ADD19 0x120208
2128/* [RW 10] Bandwidth addition to VQ20 read requests */
2129#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c
2130/* [RW 10] Bandwidth addition to VQ22 read requests */
2131#define PXP2_REG_RQ_BW_RD_ADD22 0x120210
2132/* [RW 10] Bandwidth addition to VQ23 read requests */
2133#define PXP2_REG_RQ_BW_RD_ADD23 0x120214
2134/* [RW 10] Bandwidth addition to VQ24 read requests */
2135#define PXP2_REG_RQ_BW_RD_ADD24 0x120218
2136/* [RW 10] Bandwidth addition to VQ25 read requests */
2137#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c
2138/* [RW 10] Bandwidth addition to VQ26 read requests */
2139#define PXP2_REG_RQ_BW_RD_ADD26 0x120220
2140/* [RW 10] Bandwidth addition to VQ27 read requests */
2141#define PXP2_REG_RQ_BW_RD_ADD27 0x120224
2142/* [RW 10] Bandwidth addition to VQ4 read requests */
2143#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc
2144/* [RW 10] Bandwidth addition to VQ5 read requests */
2145#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0
2146/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
2147#define PXP2_REG_RQ_BW_RD_L0 0x1202ac
2148/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
2149#define PXP2_REG_RQ_BW_RD_L12 0x1202dc
2150/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
2151#define PXP2_REG_RQ_BW_RD_L13 0x1202e0
2152/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
2153#define PXP2_REG_RQ_BW_RD_L14 0x1202e4
2154/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
2155#define PXP2_REG_RQ_BW_RD_L15 0x1202e8
2156/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
2157#define PXP2_REG_RQ_BW_RD_L16 0x1202ec
2158/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
2159#define PXP2_REG_RQ_BW_RD_L17 0x1202f0
2160/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
2161#define PXP2_REG_RQ_BW_RD_L18 0x1202f4
2162/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
2163#define PXP2_REG_RQ_BW_RD_L19 0x1202f8
2164/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
2165#define PXP2_REG_RQ_BW_RD_L20 0x1202fc
2166/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
2167#define PXP2_REG_RQ_BW_RD_L22 0x120300
2168/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
2169#define PXP2_REG_RQ_BW_RD_L23 0x120304
2170/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
2171#define PXP2_REG_RQ_BW_RD_L24 0x120308
2172/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
2173#define PXP2_REG_RQ_BW_RD_L25 0x12030c
2174/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
2175#define PXP2_REG_RQ_BW_RD_L26 0x120310
2176/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
2177#define PXP2_REG_RQ_BW_RD_L27 0x120314
2178/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
2179#define PXP2_REG_RQ_BW_RD_L4 0x1202bc
2180/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
2181#define PXP2_REG_RQ_BW_RD_L5 0x1202c0
2182/* [RW 7] Bandwidth upper bound for VQ0 read requests */
2183#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234
2184/* [RW 7] Bandwidth upper bound for VQ12 read requests */
2185#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264
2186/* [RW 7] Bandwidth upper bound for VQ13 read requests */
2187#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268
2188/* [RW 7] Bandwidth upper bound for VQ14 read requests */
2189#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c
2190/* [RW 7] Bandwidth upper bound for VQ15 read requests */
2191#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270
2192/* [RW 7] Bandwidth upper bound for VQ16 read requests */
2193#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274
2194/* [RW 7] Bandwidth upper bound for VQ17 read requests */
2195#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278
2196/* [RW 7] Bandwidth upper bound for VQ18 read requests */
2197#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c
2198/* [RW 7] Bandwidth upper bound for VQ19 read requests */
2199#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280
2200/* [RW 7] Bandwidth upper bound for VQ20 read requests */
2201#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284
2202/* [RW 7] Bandwidth upper bound for VQ22 read requests */
2203#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288
2204/* [RW 7] Bandwidth upper bound for VQ23 read requests */
2205#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c
2206/* [RW 7] Bandwidth upper bound for VQ24 read requests */
2207#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290
2208/* [RW 7] Bandwidth upper bound for VQ25 read requests */
2209#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294
2210/* [RW 7] Bandwidth upper bound for VQ26 read requests */
2211#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298
2212/* [RW 7] Bandwidth upper bound for VQ27 read requests */
2213#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c
2214/* [RW 7] Bandwidth upper bound for VQ4 read requests */
2215#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244
2216/* [RW 7] Bandwidth upper bound for VQ5 read requests */
2217#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248
2218/* [RW 10] Bandwidth addition to VQ29 write requests */
2219#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c
2220/* [RW 10] Bandwidth addition to VQ30 write requests */
2221#define PXP2_REG_RQ_BW_WR_ADD30 0x120230
2222/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
2223#define PXP2_REG_RQ_BW_WR_L29 0x12031c
2224/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
2225#define PXP2_REG_RQ_BW_WR_L30 0x120320
2226/* [RW 7] Bandwidth upper bound for VQ29 */
2227#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
2228/* [RW 7] Bandwidth upper bound for VQ30 */
2229#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
2230/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
2231#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
2232/* [RW 2] Endian mode for cdu */
2233#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
2234#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
2235#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
2236/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
2237 -128k */
2238#define PXP2_REG_RQ_CDU_P_SIZE 0x120018
2239/* [R 1] 1' indicates that the requester has finished its internal
2240 configuration */
2241#define PXP2_REG_RQ_CFG_DONE 0x1201b4
2242/* [RW 2] Endian mode for debug */
2243#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4
2244/* [RW 1] When '1'; requests will enter input buffers but wont get out
2245 towards the glue */
2246#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
2247/* [RW 1] 1 - SR will be aligned by 64B; 0 - SR will be aligned by 8B */
2248#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
2249/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
2250 be asserted */
2251#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
2252/* [RW 2] Endian mode for hc */
2253#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
2254/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
2255 compatibility needs; Note that different registers are used per mode */
2256#define PXP2_REG_RQ_ILT_MODE 0x1205b4
2257/* [WB 53] Onchip address table */
2258#define PXP2_REG_RQ_ONCHIP_AT 0x122000
2259/* [WB 53] Onchip address table - B0 */
2260#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
2261/* [RW 13] Pending read limiter threshold; in Dwords */
2262#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
2263/* [RW 2] Endian mode for qm */
2264#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
2265#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
2266#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
2267/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
2268 -128k */
2269#define PXP2_REG_RQ_QM_P_SIZE 0x120050
2270/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
2271#define PXP2_REG_RQ_RBC_DONE 0x1201b0
2272/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
2273 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
2274#define PXP2_REG_RQ_RD_MBS0 0x120160
2275/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
2276 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
2277#define PXP2_REG_RQ_RD_MBS1 0x120168
2278/* [RW 2] Endian mode for src */
2279#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
2280#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
2281#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
2282/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
2283 -128k */
2284#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
2285/* [RW 2] Endian mode for tm */
2286#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
2287#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
2288#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
2289/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
2290 -128k */
2291#define PXP2_REG_RQ_TM_P_SIZE 0x120034
2292/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
2293#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
2294/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
2295#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
2296/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
2297#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
2298/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
2299#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818
2300/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
2301#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820
2302/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
2303#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828
2304/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
2305#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830
2306/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
2307#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838
2308/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
2309#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840
2310/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
2311#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848
2312/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
2313#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850
2314/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
2315#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858
2316/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
2317#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860
2318/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
2319#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868
2320/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
2321#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870
2322/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
2323#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878
2324/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
2325#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880
2326/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
2327#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888
2328/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
2329#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890
2330/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
2331#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898
2332/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
2333#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0
2334/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
2335#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8
2336/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
2337#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0
2338/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
2339#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8
2340/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
2341#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0
2342/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
2343#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8
2344/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
2345#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0
2346/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
2347#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8
2348/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
2349#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0
2350/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
2351#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8
2352/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
2353#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0
2354/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
2355#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8
2356/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
2357#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900
2358/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
2359#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908
2360/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
2361 001:256B; 010: 512B; */
2362#define PXP2_REG_RQ_WR_MBS0 0x12015c
2363/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
2364 001:256B; 010: 512B; */
2365#define PXP2_REG_RQ_WR_MBS1 0x120164
2366/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2367 buffer reaches this number has_payload will be asserted */
2368#define PXP2_REG_WR_CDU_MPS 0x1205f0
2369/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2370 buffer reaches this number has_payload will be asserted */
2371#define PXP2_REG_WR_CSDM_MPS 0x1205d0
2372/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2373 buffer reaches this number has_payload will be asserted */
2374#define PXP2_REG_WR_DBG_MPS 0x1205e8
2375/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2376 buffer reaches this number has_payload will be asserted */
2377#define PXP2_REG_WR_DMAE_MPS 0x1205ec
2378/* [RW 10] if Number of entries in dmae fifo will be higher than this
2379 threshold then has_payload indication will be asserted; the default value
2380 should be equal to &gt; write MBS size! */
2381#define PXP2_REG_WR_DMAE_TH 0x120368
2382/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2383 buffer reaches this number has_payload will be asserted */
2384#define PXP2_REG_WR_HC_MPS 0x1205c8
2385/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2386 buffer reaches this number has_payload will be asserted */
2387#define PXP2_REG_WR_QM_MPS 0x1205dc
2388/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
2389#define PXP2_REG_WR_REV_MODE 0x120670
2390/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2391 buffer reaches this number has_payload will be asserted */
2392#define PXP2_REG_WR_SRC_MPS 0x1205e4
2393/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2394 buffer reaches this number has_payload will be asserted */
2395#define PXP2_REG_WR_TM_MPS 0x1205e0
2396/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2397 buffer reaches this number has_payload will be asserted */
2398#define PXP2_REG_WR_TSDM_MPS 0x1205d4
2399/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
2400 threshold then has_payload indication will be asserted; the default value
2401 should be equal to &gt; write MBS size! */
2402#define PXP2_REG_WR_USDMDP_TH 0x120348
2403/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2404 buffer reaches this number has_payload will be asserted */
2405#define PXP2_REG_WR_USDM_MPS 0x1205cc
2406/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2407 buffer reaches this number has_payload will be asserted */
2408#define PXP2_REG_WR_XSDM_MPS 0x1205d8
2409/* [R 1] debug only: Indication if PSWHST arbiter is idle */
2410#define PXP_REG_HST_ARB_IS_IDLE 0x103004
2411/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
2412 this client is waiting for the arbiter. */
2413#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
2414/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
2415 block. Should be used for close the gates. */
2416#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
2417/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
2418 should update accoring to 'hst_discard_doorbells' register when the state
2419 machine is idle */
2420#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
2421/* [RW 1] When 1; new internal writes arriving to the block are discarded.
2422 Should be used for close the gates. */
2423#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
2424/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
2425 means this PSWHST is discarding inputs from this client. Each bit should
2426 update accoring to 'hst_discard_internal_writes' register when the state
2427 machine is idle. */
2428#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
2429/* [WB 160] Used for initialization of the inbound interrupts memory */
2430#define PXP_REG_HST_INBOUND_INT 0x103800
2431/* [RW 32] Interrupt mask register #0 read/write */
2432#define PXP_REG_PXP_INT_MASK_0 0x103074
2433#define PXP_REG_PXP_INT_MASK_1 0x103084
2434/* [R 32] Interrupt register #0 read */
2435#define PXP_REG_PXP_INT_STS_0 0x103068
2436#define PXP_REG_PXP_INT_STS_1 0x103078
2437/* [RC 32] Interrupt register #0 read clear */
2438#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
2439/* [RW 26] Parity mask register #0 read/write */
2440#define PXP_REG_PXP_PRTY_MASK 0x103094
2441/* [R 26] Parity register #0 read */
2442#define PXP_REG_PXP_PRTY_STS 0x103088
2443/* [RW 4] The activity counter initial increment value sent in the load
2444 request */
2445#define QM_REG_ACTCTRINITVAL_0 0x168040
2446#define QM_REG_ACTCTRINITVAL_1 0x168044
2447#define QM_REG_ACTCTRINITVAL_2 0x168048
2448#define QM_REG_ACTCTRINITVAL_3 0x16804c
2449/* [RW 32] The base logical address (in bytes) of each physical queue. The
2450 index I represents the physical queue number. The 12 lsbs are ignore and
2451 considered zero so practically there are only 20 bits in this register;
2452 queues 63-0 */
2453#define QM_REG_BASEADDR 0x168900
2454/* [RW 32] The base logical address (in bytes) of each physical queue. The
2455 index I represents the physical queue number. The 12 lsbs are ignore and
2456 considered zero so practically there are only 20 bits in this register;
2457 queues 127-64 */
2458#define QM_REG_BASEADDR_EXT_A 0x16e100
2459/* [RW 16] The byte credit cost for each task. This value is for both ports */
2460#define QM_REG_BYTECRDCOST 0x168234
2461/* [RW 16] The initial byte credit value for both ports. */
2462#define QM_REG_BYTECRDINITVAL 0x168238
2463/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2464 queue uses port 0 else it uses port 1; queues 31-0 */
2465#define QM_REG_BYTECRDPORT_LSB 0x168228
2466/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2467 queue uses port 0 else it uses port 1; queues 95-64 */
2468#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
2469/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2470 queue uses port 0 else it uses port 1; queues 63-32 */
2471#define QM_REG_BYTECRDPORT_MSB 0x168224
2472/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
2473 queue uses port 0 else it uses port 1; queues 127-96 */
2474#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
2475/* [RW 16] The byte credit value that if above the QM is considered almost
2476 full */
2477#define QM_REG_BYTECREDITAFULLTHR 0x168094
2478/* [RW 4] The initial credit for interface */
2479#define QM_REG_CMINITCRD_0 0x1680cc
2480#define QM_REG_CMINITCRD_1 0x1680d0
2481#define QM_REG_CMINITCRD_2 0x1680d4
2482#define QM_REG_CMINITCRD_3 0x1680d8
2483#define QM_REG_CMINITCRD_4 0x1680dc
2484#define QM_REG_CMINITCRD_5 0x1680e0
2485#define QM_REG_CMINITCRD_6 0x1680e4
2486#define QM_REG_CMINITCRD_7 0x1680e8
2487/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
2488 is masked */
2489#define QM_REG_CMINTEN 0x1680ec
2490/* [RW 12] A bit vector which indicates which one of the queues are tied to
2491 interface 0 */
2492#define QM_REG_CMINTVOQMASK_0 0x1681f4
2493#define QM_REG_CMINTVOQMASK_1 0x1681f8
2494#define QM_REG_CMINTVOQMASK_2 0x1681fc
2495#define QM_REG_CMINTVOQMASK_3 0x168200
2496#define QM_REG_CMINTVOQMASK_4 0x168204
2497#define QM_REG_CMINTVOQMASK_5 0x168208
2498#define QM_REG_CMINTVOQMASK_6 0x16820c
2499#define QM_REG_CMINTVOQMASK_7 0x168210
2500/* [RW 20] The number of connections divided by 16 which dictates the size
2501 of each queue which belongs to even function number. */
2502#define QM_REG_CONNNUM_0 0x168020
2503/* [R 6] Keep the fill level of the fifo from write client 4 */
2504#define QM_REG_CQM_WRC_FIFOLVL 0x168018
2505/* [RW 8] The context regions sent in the CFC load request */
2506#define QM_REG_CTXREG_0 0x168030
2507#define QM_REG_CTXREG_1 0x168034
2508#define QM_REG_CTXREG_2 0x168038
2509#define QM_REG_CTXREG_3 0x16803c
2510/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
2511 bypass enable */
2512#define QM_REG_ENBYPVOQMASK 0x16823c
2513/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2514 physical queue uses the byte credit; queues 31-0 */
2515#define QM_REG_ENBYTECRD_LSB 0x168220
2516/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2517 physical queue uses the byte credit; queues 95-64 */
2518#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
2519/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2520 physical queue uses the byte credit; queues 63-32 */
2521#define QM_REG_ENBYTECRD_MSB 0x16821c
2522/* [RW 32] A bit mask per each physical queue. If a bit is set then the
2523 physical queue uses the byte credit; queues 127-96 */
2524#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
2525/* [RW 4] If cleared then the secondary interface will not be served by the
2526 RR arbiter */
2527#define QM_REG_ENSEC 0x1680f0
2528/* [RW 32] NA */
2529#define QM_REG_FUNCNUMSEL_LSB 0x168230
2530/* [RW 32] NA */
2531#define QM_REG_FUNCNUMSEL_MSB 0x16822c
2532/* [RW 32] A mask register to mask the Almost empty signals which will not
2533 be use for the almost empty indication to the HW block; queues 31:0 */
2534#define QM_REG_HWAEMPTYMASK_LSB 0x168218
2535/* [RW 32] A mask register to mask the Almost empty signals which will not
2536 be use for the almost empty indication to the HW block; queues 95-64 */
2537#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
2538/* [RW 32] A mask register to mask the Almost empty signals which will not
2539 be use for the almost empty indication to the HW block; queues 63:32 */
2540#define QM_REG_HWAEMPTYMASK_MSB 0x168214
2541/* [RW 32] A mask register to mask the Almost empty signals which will not
2542 be use for the almost empty indication to the HW block; queues 127-96 */
2543#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
2544/* [RW 4] The number of outstanding request to CFC */
2545#define QM_REG_OUTLDREQ 0x168804
2546/* [RC 1] A flag to indicate that overflow error occurred in one of the
2547 queues. */
2548#define QM_REG_OVFERROR 0x16805c
2549/* [RC 7] the Q where the overflow occurs */
2550#define QM_REG_OVFQNUM 0x168058
2551/* [R 16] Pause state for physical queues 15-0 */
2552#define QM_REG_PAUSESTATE0 0x168410
2553/* [R 16] Pause state for physical queues 31-16 */
2554#define QM_REG_PAUSESTATE1 0x168414
2555/* [R 16] Pause state for physical queues 47-32 */
2556#define QM_REG_PAUSESTATE2 0x16e684
2557/* [R 16] Pause state for physical queues 63-48 */
2558#define QM_REG_PAUSESTATE3 0x16e688
2559/* [R 16] Pause state for physical queues 79-64 */
2560#define QM_REG_PAUSESTATE4 0x16e68c
2561/* [R 16] Pause state for physical queues 95-80 */
2562#define QM_REG_PAUSESTATE5 0x16e690
2563/* [R 16] Pause state for physical queues 111-96 */
2564#define QM_REG_PAUSESTATE6 0x16e694
2565/* [R 16] Pause state for physical queues 127-112 */
2566#define QM_REG_PAUSESTATE7 0x16e698
2567/* [RW 2] The PCI attributes field used in the PCI request. */
2568#define QM_REG_PCIREQAT 0x168054
2569/* [R 16] The byte credit of port 0 */
2570#define QM_REG_PORT0BYTECRD 0x168300
2571/* [R 16] The byte credit of port 1 */
2572#define QM_REG_PORT1BYTECRD 0x168304
2573/* [RW 3] pci function number of queues 15-0 */
2574#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
2575#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
2576#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
2577#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
2578#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
2579#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
2580#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
2581#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
2582/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
2583 ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
2584 bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
2585#define QM_REG_PTRTBL 0x168a00
2586/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
2587 ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
2588 bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
2589#define QM_REG_PTRTBL_EXT_A 0x16e200
2590/* [RW 2] Interrupt mask register #0 read/write */
2591#define QM_REG_QM_INT_MASK 0x168444
2592/* [R 2] Interrupt register #0 read */
2593#define QM_REG_QM_INT_STS 0x168438
2594/* [RW 12] Parity mask register #0 read/write */
2595#define QM_REG_QM_PRTY_MASK 0x168454
2596/* [R 12] Parity register #0 read */
2597#define QM_REG_QM_PRTY_STS 0x168448
2598/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
2599#define QM_REG_QSTATUS_HIGH 0x16802c
2600/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
2601#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
2602/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
2603#define QM_REG_QSTATUS_LOW 0x168028
2604/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
2605#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
2606/* [R 24] The number of tasks queued for each queue; queues 63-0 */
2607#define QM_REG_QTASKCTR_0 0x168308
2608/* [R 24] The number of tasks queued for each queue; queues 127-64 */
2609#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
2610/* [RW 4] Queue tied to VOQ */
2611#define QM_REG_QVOQIDX_0 0x1680f4
2612#define QM_REG_QVOQIDX_10 0x16811c
2613#define QM_REG_QVOQIDX_100 0x16e49c
2614#define QM_REG_QVOQIDX_101 0x16e4a0
2615#define QM_REG_QVOQIDX_102 0x16e4a4
2616#define QM_REG_QVOQIDX_103 0x16e4a8
2617#define QM_REG_QVOQIDX_104 0x16e4ac
2618#define QM_REG_QVOQIDX_105 0x16e4b0
2619#define QM_REG_QVOQIDX_106 0x16e4b4
2620#define QM_REG_QVOQIDX_107 0x16e4b8
2621#define QM_REG_QVOQIDX_108 0x16e4bc
2622#define QM_REG_QVOQIDX_109 0x16e4c0
2623#define QM_REG_QVOQIDX_11 0x168120
2624#define QM_REG_QVOQIDX_110 0x16e4c4
2625#define QM_REG_QVOQIDX_111 0x16e4c8
2626#define QM_REG_QVOQIDX_112 0x16e4cc
2627#define QM_REG_QVOQIDX_113 0x16e4d0
2628#define QM_REG_QVOQIDX_114 0x16e4d4
2629#define QM_REG_QVOQIDX_115 0x16e4d8
2630#define QM_REG_QVOQIDX_116 0x16e4dc
2631#define QM_REG_QVOQIDX_117 0x16e4e0
2632#define QM_REG_QVOQIDX_118 0x16e4e4
2633#define QM_REG_QVOQIDX_119 0x16e4e8
2634#define QM_REG_QVOQIDX_12 0x168124
2635#define QM_REG_QVOQIDX_120 0x16e4ec
2636#define QM_REG_QVOQIDX_121 0x16e4f0
2637#define QM_REG_QVOQIDX_122 0x16e4f4
2638#define QM_REG_QVOQIDX_123 0x16e4f8
2639#define QM_REG_QVOQIDX_124 0x16e4fc
2640#define QM_REG_QVOQIDX_125 0x16e500
2641#define QM_REG_QVOQIDX_126 0x16e504
2642#define QM_REG_QVOQIDX_127 0x16e508
2643#define QM_REG_QVOQIDX_13 0x168128
2644#define QM_REG_QVOQIDX_14 0x16812c
2645#define QM_REG_QVOQIDX_15 0x168130
2646#define QM_REG_QVOQIDX_16 0x168134
2647#define QM_REG_QVOQIDX_17 0x168138
2648#define QM_REG_QVOQIDX_21 0x168148
2649#define QM_REG_QVOQIDX_22 0x16814c
2650#define QM_REG_QVOQIDX_23 0x168150
2651#define QM_REG_QVOQIDX_24 0x168154
2652#define QM_REG_QVOQIDX_25 0x168158
2653#define QM_REG_QVOQIDX_26 0x16815c
2654#define QM_REG_QVOQIDX_27 0x168160
2655#define QM_REG_QVOQIDX_28 0x168164
2656#define QM_REG_QVOQIDX_29 0x168168
2657#define QM_REG_QVOQIDX_30 0x16816c
2658#define QM_REG_QVOQIDX_31 0x168170
2659#define QM_REG_QVOQIDX_32 0x168174
2660#define QM_REG_QVOQIDX_33 0x168178
2661#define QM_REG_QVOQIDX_34 0x16817c
2662#define QM_REG_QVOQIDX_35 0x168180
2663#define QM_REG_QVOQIDX_36 0x168184
2664#define QM_REG_QVOQIDX_37 0x168188
2665#define QM_REG_QVOQIDX_38 0x16818c
2666#define QM_REG_QVOQIDX_39 0x168190
2667#define QM_REG_QVOQIDX_40 0x168194
2668#define QM_REG_QVOQIDX_41 0x168198
2669#define QM_REG_QVOQIDX_42 0x16819c
2670#define QM_REG_QVOQIDX_43 0x1681a0
2671#define QM_REG_QVOQIDX_44 0x1681a4
2672#define QM_REG_QVOQIDX_45 0x1681a8
2673#define QM_REG_QVOQIDX_46 0x1681ac
2674#define QM_REG_QVOQIDX_47 0x1681b0
2675#define QM_REG_QVOQIDX_48 0x1681b4
2676#define QM_REG_QVOQIDX_49 0x1681b8
2677#define QM_REG_QVOQIDX_5 0x168108
2678#define QM_REG_QVOQIDX_50 0x1681bc
2679#define QM_REG_QVOQIDX_51 0x1681c0
2680#define QM_REG_QVOQIDX_52 0x1681c4
2681#define QM_REG_QVOQIDX_53 0x1681c8
2682#define QM_REG_QVOQIDX_54 0x1681cc
2683#define QM_REG_QVOQIDX_55 0x1681d0
2684#define QM_REG_QVOQIDX_56 0x1681d4
2685#define QM_REG_QVOQIDX_57 0x1681d8
2686#define QM_REG_QVOQIDX_58 0x1681dc
2687#define QM_REG_QVOQIDX_59 0x1681e0
2688#define QM_REG_QVOQIDX_6 0x16810c
2689#define QM_REG_QVOQIDX_60 0x1681e4
2690#define QM_REG_QVOQIDX_61 0x1681e8
2691#define QM_REG_QVOQIDX_62 0x1681ec
2692#define QM_REG_QVOQIDX_63 0x1681f0
2693#define QM_REG_QVOQIDX_64 0x16e40c
2694#define QM_REG_QVOQIDX_65 0x16e410
2695#define QM_REG_QVOQIDX_69 0x16e420
2696#define QM_REG_QVOQIDX_7 0x168110
2697#define QM_REG_QVOQIDX_70 0x16e424
2698#define QM_REG_QVOQIDX_71 0x16e428
2699#define QM_REG_QVOQIDX_72 0x16e42c
2700#define QM_REG_QVOQIDX_73 0x16e430
2701#define QM_REG_QVOQIDX_74 0x16e434
2702#define QM_REG_QVOQIDX_75 0x16e438
2703#define QM_REG_QVOQIDX_76 0x16e43c
2704#define QM_REG_QVOQIDX_77 0x16e440
2705#define QM_REG_QVOQIDX_78 0x16e444
2706#define QM_REG_QVOQIDX_79 0x16e448
2707#define QM_REG_QVOQIDX_8 0x168114
2708#define QM_REG_QVOQIDX_80 0x16e44c
2709#define QM_REG_QVOQIDX_81 0x16e450
2710#define QM_REG_QVOQIDX_85 0x16e460
2711#define QM_REG_QVOQIDX_86 0x16e464
2712#define QM_REG_QVOQIDX_87 0x16e468
2713#define QM_REG_QVOQIDX_88 0x16e46c
2714#define QM_REG_QVOQIDX_89 0x16e470
2715#define QM_REG_QVOQIDX_9 0x168118
2716#define QM_REG_QVOQIDX_90 0x16e474
2717#define QM_REG_QVOQIDX_91 0x16e478
2718#define QM_REG_QVOQIDX_92 0x16e47c
2719#define QM_REG_QVOQIDX_93 0x16e480
2720#define QM_REG_QVOQIDX_94 0x16e484
2721#define QM_REG_QVOQIDX_95 0x16e488
2722#define QM_REG_QVOQIDX_96 0x16e48c
2723#define QM_REG_QVOQIDX_97 0x16e490
2724#define QM_REG_QVOQIDX_98 0x16e494
2725#define QM_REG_QVOQIDX_99 0x16e498
2726/* [RW 1] Initialization bit command */
2727#define QM_REG_SOFT_RESET 0x168428
2728/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
2729#define QM_REG_TASKCRDCOST_0 0x16809c
2730#define QM_REG_TASKCRDCOST_1 0x1680a0
2731#define QM_REG_TASKCRDCOST_2 0x1680a4
2732#define QM_REG_TASKCRDCOST_4 0x1680ac
2733#define QM_REG_TASKCRDCOST_5 0x1680b0
2734/* [R 6] Keep the fill level of the fifo from write client 3 */
2735#define QM_REG_TQM_WRC_FIFOLVL 0x168010
2736/* [R 6] Keep the fill level of the fifo from write client 2 */
2737#define QM_REG_UQM_WRC_FIFOLVL 0x168008
2738/* [RC 32] Credit update error register */
2739#define QM_REG_VOQCRDERRREG 0x168408
2740/* [R 16] The credit value for each VOQ */
2741#define QM_REG_VOQCREDIT_0 0x1682d0
2742#define QM_REG_VOQCREDIT_1 0x1682d4
2743#define QM_REG_VOQCREDIT_4 0x1682e0
2744/* [RW 16] The credit value that if above the QM is considered almost full */
2745#define QM_REG_VOQCREDITAFULLTHR 0x168090
2746/* [RW 16] The init and maximum credit for each VoQ */
2747#define QM_REG_VOQINITCREDIT_0 0x168060
2748#define QM_REG_VOQINITCREDIT_1 0x168064
2749#define QM_REG_VOQINITCREDIT_2 0x168068
2750#define QM_REG_VOQINITCREDIT_4 0x168070
2751#define QM_REG_VOQINITCREDIT_5 0x168074
2752/* [RW 1] The port of which VOQ belongs */
2753#define QM_REG_VOQPORT_0 0x1682a0
2754#define QM_REG_VOQPORT_1 0x1682a4
2755#define QM_REG_VOQPORT_2 0x1682a8
2756/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2757#define QM_REG_VOQQMASK_0_LSB 0x168240
2758/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2759#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
2760/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2761#define QM_REG_VOQQMASK_0_MSB 0x168244
2762/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2763#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
2764/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2765#define QM_REG_VOQQMASK_10_LSB 0x168290
2766/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2767#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
2768/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2769#define QM_REG_VOQQMASK_10_MSB 0x168294
2770/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2771#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
2772/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2773#define QM_REG_VOQQMASK_11_LSB 0x168298
2774/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2775#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
2776/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2777#define QM_REG_VOQQMASK_11_MSB 0x16829c
2778/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2779#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
2780/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2781#define QM_REG_VOQQMASK_1_LSB 0x168248
2782/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2783#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
2784/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2785#define QM_REG_VOQQMASK_1_MSB 0x16824c
2786/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2787#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
2788/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2789#define QM_REG_VOQQMASK_2_LSB 0x168250
2790/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2791#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
2792/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2793#define QM_REG_VOQQMASK_2_MSB 0x168254
2794/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2795#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
2796/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2797#define QM_REG_VOQQMASK_3_LSB 0x168258
2798/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2799#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
2800/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2801#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
2802/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2803#define QM_REG_VOQQMASK_4_LSB 0x168260
2804/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2805#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
2806/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2807#define QM_REG_VOQQMASK_4_MSB 0x168264
2808/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2809#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
2810/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2811#define QM_REG_VOQQMASK_5_LSB 0x168268
2812/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2813#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
2814/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2815#define QM_REG_VOQQMASK_5_MSB 0x16826c
2816/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2817#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
2818/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2819#define QM_REG_VOQQMASK_6_LSB 0x168270
2820/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2821#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
2822/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2823#define QM_REG_VOQQMASK_6_MSB 0x168274
2824/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2825#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
2826/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2827#define QM_REG_VOQQMASK_7_LSB 0x168278
2828/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2829#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
2830/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2831#define QM_REG_VOQQMASK_7_MSB 0x16827c
2832/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2833#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
2834/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2835#define QM_REG_VOQQMASK_8_LSB 0x168280
2836/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2837#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
2838/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
2839#define QM_REG_VOQQMASK_8_MSB 0x168284
2840/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2841#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
2842/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
2843#define QM_REG_VOQQMASK_9_LSB 0x168288
2844/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
2845#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
2846/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
2847#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
2848/* [RW 32] Wrr weights */
2849#define QM_REG_WRRWEIGHTS_0 0x16880c
2850#define QM_REG_WRRWEIGHTS_1 0x168810
2851#define QM_REG_WRRWEIGHTS_10 0x168814
2852#define QM_REG_WRRWEIGHTS_11 0x168818
2853#define QM_REG_WRRWEIGHTS_12 0x16881c
2854#define QM_REG_WRRWEIGHTS_13 0x168820
2855#define QM_REG_WRRWEIGHTS_14 0x168824
2856#define QM_REG_WRRWEIGHTS_15 0x168828
2857#define QM_REG_WRRWEIGHTS_16 0x16e000
2858#define QM_REG_WRRWEIGHTS_17 0x16e004
2859#define QM_REG_WRRWEIGHTS_18 0x16e008
2860#define QM_REG_WRRWEIGHTS_19 0x16e00c
2861#define QM_REG_WRRWEIGHTS_2 0x16882c
2862#define QM_REG_WRRWEIGHTS_20 0x16e010
2863#define QM_REG_WRRWEIGHTS_21 0x16e014
2864#define QM_REG_WRRWEIGHTS_22 0x16e018
2865#define QM_REG_WRRWEIGHTS_23 0x16e01c
2866#define QM_REG_WRRWEIGHTS_24 0x16e020
2867#define QM_REG_WRRWEIGHTS_25 0x16e024
2868#define QM_REG_WRRWEIGHTS_26 0x16e028
2869#define QM_REG_WRRWEIGHTS_27 0x16e02c
2870#define QM_REG_WRRWEIGHTS_28 0x16e030
2871#define QM_REG_WRRWEIGHTS_29 0x16e034
2872#define QM_REG_WRRWEIGHTS_3 0x168830
2873#define QM_REG_WRRWEIGHTS_30 0x16e038
2874#define QM_REG_WRRWEIGHTS_31 0x16e03c
2875#define QM_REG_WRRWEIGHTS_4 0x168834
2876#define QM_REG_WRRWEIGHTS_5 0x168838
2877#define QM_REG_WRRWEIGHTS_6 0x16883c
2878#define QM_REG_WRRWEIGHTS_7 0x168840
2879#define QM_REG_WRRWEIGHTS_8 0x168844
2880#define QM_REG_WRRWEIGHTS_9 0x168848
2881/* [R 6] Keep the fill level of the fifo from write client 1 */
2882#define QM_REG_XQM_WRC_FIFOLVL 0x168000
2883#define SRC_REG_COUNTFREE0 0x40500
2884/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
2885 ports. If set the searcher support 8 functions. */
2886#define SRC_REG_E1HMF_ENABLE 0x404cc
2887#define SRC_REG_FIRSTFREE0 0x40510
2888#define SRC_REG_KEYRSS0_0 0x40408
2889#define SRC_REG_KEYRSS0_7 0x40424
2890#define SRC_REG_KEYRSS1_9 0x40454
2891#define SRC_REG_KEYSEARCH_0 0x40458
2892#define SRC_REG_KEYSEARCH_1 0x4045c
2893#define SRC_REG_KEYSEARCH_2 0x40460
2894#define SRC_REG_KEYSEARCH_3 0x40464
2895#define SRC_REG_KEYSEARCH_4 0x40468
2896#define SRC_REG_KEYSEARCH_5 0x4046c
2897#define SRC_REG_KEYSEARCH_6 0x40470
2898#define SRC_REG_KEYSEARCH_7 0x40474
2899#define SRC_REG_KEYSEARCH_8 0x40478
2900#define SRC_REG_KEYSEARCH_9 0x4047c
2901#define SRC_REG_LASTFREE0 0x40530
2902#define SRC_REG_NUMBER_HASH_BITS0 0x40400
2903/* [RW 1] Reset internal state machines. */
2904#define SRC_REG_SOFT_RST 0x4049c
2905/* [R 3] Interrupt register #0 read */
2906#define SRC_REG_SRC_INT_STS 0x404ac
2907/* [RW 3] Parity mask register #0 read/write */
2908#define SRC_REG_SRC_PRTY_MASK 0x404c8
2909/* [R 3] Parity register #0 read */
2910#define SRC_REG_SRC_PRTY_STS 0x404bc
2911/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
2912#define TCM_REG_CAM_OCCUP 0x5017c
2913/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
2914 disregarded; valid output is deasserted; all other signals are treated as
2915 usual; if 1 - normal activity. */
2916#define TCM_REG_CDU_AG_RD_IFEN 0x50034
2917/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
2918 are disregarded; all other signals are treated as usual; if 1 - normal
2919 activity. */
2920#define TCM_REG_CDU_AG_WR_IFEN 0x50030
2921/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
2922 disregarded; valid output is deasserted; all other signals are treated as
2923 usual; if 1 - normal activity. */
2924#define TCM_REG_CDU_SM_RD_IFEN 0x5003c
2925/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
2926 input is disregarded; all other signals are treated as usual; if 1 -
2927 normal activity. */
2928#define TCM_REG_CDU_SM_WR_IFEN 0x50038
2929/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
2930 the initial credit value; read returns the current value of the credit
2931 counter. Must be initialized to 1 at start-up. */
2932#define TCM_REG_CFC_INIT_CRD 0x50204
2933/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
2934 weight 8 (the most prioritised); 1 stands for weight 1(least
2935 prioritised); 2 stands for weight 2; tc. */
2936#define TCM_REG_CP_WEIGHT 0x500c0
2937/* [RW 1] Input csem Interface enable. If 0 - the valid input is
2938 disregarded; acknowledge output is deasserted; all other signals are
2939 treated as usual; if 1 - normal activity. */
2940#define TCM_REG_CSEM_IFEN 0x5002c
2941/* [RC 1] Message length mismatch (relative to last indication) at the In#9
2942 interface. */
2943#define TCM_REG_CSEM_LENGTH_MIS 0x50174
2944/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
2945 weight 8 (the most prioritised); 1 stands for weight 1(least
2946 prioritised); 2 stands for weight 2; tc. */
2947#define TCM_REG_CSEM_WEIGHT 0x500bc
2948/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
2949#define TCM_REG_ERR_EVNT_ID 0x500a0
2950/* [RW 28] The CM erroneous header for QM and Timers formatting. */
2951#define TCM_REG_ERR_TCM_HDR 0x5009c
2952/* [RW 8] The Event ID for Timers expiration. */
2953#define TCM_REG_EXPR_EVNT_ID 0x500a4
2954/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
2955 writes the initial credit value; read returns the current value of the
2956 credit counter. Must be initialized to 64 at start-up. */
2957#define TCM_REG_FIC0_INIT_CRD 0x5020c
2958/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
2959 writes the initial credit value; read returns the current value of the
2960 credit counter. Must be initialized to 64 at start-up. */
2961#define TCM_REG_FIC1_INIT_CRD 0x50210
2962/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
2963 - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
2964 ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
2965 ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
2966#define TCM_REG_GR_ARB_TYPE 0x50114
2967/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
2968 highest priority is 3. It is supposed that the Store channel is the
2969 compliment of the other 3 groups. */
2970#define TCM_REG_GR_LD0_PR 0x5011c
2971/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
2972 highest priority is 3. It is supposed that the Store channel is the
2973 compliment of the other 3 groups. */
2974#define TCM_REG_GR_LD1_PR 0x50120
2975/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
2976 sent to STORM; for a specific connection type. The double REG-pairs are
2977 used to align to STORM context row size of 128 bits. The offset of these
2978 data in the STORM context is always 0. Index _i stands for the connection
2979 type (one of 16). */
2980#define TCM_REG_N_SM_CTX_LD_0 0x50050
2981#define TCM_REG_N_SM_CTX_LD_1 0x50054
2982#define TCM_REG_N_SM_CTX_LD_2 0x50058
2983#define TCM_REG_N_SM_CTX_LD_3 0x5005c
2984#define TCM_REG_N_SM_CTX_LD_4 0x50060
2985#define TCM_REG_N_SM_CTX_LD_5 0x50064
2986/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
2987 acknowledge output is deasserted; all other signals are treated as usual;
2988 if 1 - normal activity. */
2989#define TCM_REG_PBF_IFEN 0x50024
2990/* [RC 1] Message length mismatch (relative to last indication) at the In#7
2991 interface. */
2992#define TCM_REG_PBF_LENGTH_MIS 0x5016c
2993/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
2994 weight 8 (the most prioritised); 1 stands for weight 1(least
2995 prioritised); 2 stands for weight 2; tc. */
2996#define TCM_REG_PBF_WEIGHT 0x500b4
2997#define TCM_REG_PHYS_QNUM0_0 0x500e0
2998#define TCM_REG_PHYS_QNUM0_1 0x500e4
2999#define TCM_REG_PHYS_QNUM1_0 0x500e8
3000#define TCM_REG_PHYS_QNUM1_1 0x500ec
3001#define TCM_REG_PHYS_QNUM2_0 0x500f0
3002#define TCM_REG_PHYS_QNUM2_1 0x500f4
3003#define TCM_REG_PHYS_QNUM3_0 0x500f8
3004#define TCM_REG_PHYS_QNUM3_1 0x500fc
3005/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
3006 acknowledge output is deasserted; all other signals are treated as usual;
3007 if 1 - normal activity. */
3008#define TCM_REG_PRS_IFEN 0x50020
3009/* [RC 1] Message length mismatch (relative to last indication) at the In#6
3010 interface. */
3011#define TCM_REG_PRS_LENGTH_MIS 0x50168
3012/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
3013 weight 8 (the most prioritised); 1 stands for weight 1(least
3014 prioritised); 2 stands for weight 2; tc. */
3015#define TCM_REG_PRS_WEIGHT 0x500b0
3016/* [RW 8] The Event ID for Timers formatting in case of stop done. */
3017#define TCM_REG_STOP_EVNT_ID 0x500a8
3018/* [RC 1] Message length mismatch (relative to last indication) at the STORM
3019 interface. */
3020#define TCM_REG_STORM_LENGTH_MIS 0x50160
3021/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
3022 disregarded; acknowledge output is deasserted; all other signals are
3023 treated as usual; if 1 - normal activity. */
3024#define TCM_REG_STORM_TCM_IFEN 0x50010
3025/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
3026 weight 8 (the most prioritised); 1 stands for weight 1(least
3027 prioritised); 2 stands for weight 2; tc. */
3028#define TCM_REG_STORM_WEIGHT 0x500ac
3029/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
3030 acknowledge output is deasserted; all other signals are treated as usual;
3031 if 1 - normal activity. */
3032#define TCM_REG_TCM_CFC_IFEN 0x50040
3033/* [RW 11] Interrupt mask register #0 read/write */
3034#define TCM_REG_TCM_INT_MASK 0x501dc
3035/* [R 11] Interrupt register #0 read */
3036#define TCM_REG_TCM_INT_STS 0x501d0
3037/* [R 27] Parity register #0 read */
3038#define TCM_REG_TCM_PRTY_STS 0x501e0
3039/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
3040 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3041 Is used to determine the number of the AG context REG-pairs written back;
3042 when the input message Reg1WbFlg isn't set. */
3043#define TCM_REG_TCM_REG0_SZ 0x500d8
3044/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
3045 disregarded; valid is deasserted; all other signals are treated as usual;
3046 if 1 - normal activity. */
3047#define TCM_REG_TCM_STORM0_IFEN 0x50004
3048/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
3049 disregarded; valid is deasserted; all other signals are treated as usual;
3050 if 1 - normal activity. */
3051#define TCM_REG_TCM_STORM1_IFEN 0x50008
3052/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
3053 disregarded; valid is deasserted; all other signals are treated as usual;
3054 if 1 - normal activity. */
3055#define TCM_REG_TCM_TQM_IFEN 0x5000c
3056/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
3057#define TCM_REG_TCM_TQM_USE_Q 0x500d4
3058/* [RW 28] The CM header for Timers expiration command. */
3059#define TCM_REG_TM_TCM_HDR 0x50098
3060/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
3061 disregarded; acknowledge output is deasserted; all other signals are
3062 treated as usual; if 1 - normal activity. */
3063#define TCM_REG_TM_TCM_IFEN 0x5001c
3064/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
3065 weight 8 (the most prioritised); 1 stands for weight 1(least
3066 prioritised); 2 stands for weight 2; tc. */
3067#define TCM_REG_TM_WEIGHT 0x500d0
3068/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
3069 the initial credit value; read returns the current value of the credit
3070 counter. Must be initialized to 32 at start-up. */
3071#define TCM_REG_TQM_INIT_CRD 0x5021c
3072/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
3073 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3074 prioritised); 2 stands for weight 2; tc. */
3075#define TCM_REG_TQM_P_WEIGHT 0x500c8
3076/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
3077 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3078 prioritised); 2 stands for weight 2; tc. */
3079#define TCM_REG_TQM_S_WEIGHT 0x500cc
3080/* [RW 28] The CM header value for QM request (primary). */
3081#define TCM_REG_TQM_TCM_HDR_P 0x50090
3082/* [RW 28] The CM header value for QM request (secondary). */
3083#define TCM_REG_TQM_TCM_HDR_S 0x50094
3084/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
3085 acknowledge output is deasserted; all other signals are treated as usual;
3086 if 1 - normal activity. */
3087#define TCM_REG_TQM_TCM_IFEN 0x50014
3088/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
3089 acknowledge output is deasserted; all other signals are treated as usual;
3090 if 1 - normal activity. */
3091#define TCM_REG_TSDM_IFEN 0x50018
3092/* [RC 1] Message length mismatch (relative to last indication) at the SDM
3093 interface. */
3094#define TCM_REG_TSDM_LENGTH_MIS 0x50164
3095/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
3096 weight 8 (the most prioritised); 1 stands for weight 1(least
3097 prioritised); 2 stands for weight 2; tc. */
3098#define TCM_REG_TSDM_WEIGHT 0x500c4
3099/* [RW 1] Input usem Interface enable. If 0 - the valid input is
3100 disregarded; acknowledge output is deasserted; all other signals are
3101 treated as usual; if 1 - normal activity. */
3102#define TCM_REG_USEM_IFEN 0x50028
3103/* [RC 1] Message length mismatch (relative to last indication) at the In#8
3104 interface. */
3105#define TCM_REG_USEM_LENGTH_MIS 0x50170
3106/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
3107 weight 8 (the most prioritised); 1 stands for weight 1(least
3108 prioritised); 2 stands for weight 2; tc. */
3109#define TCM_REG_USEM_WEIGHT 0x500b8
3110/* [RW 21] Indirect access to the descriptor table of the XX protection
3111 mechanism. The fields are: [5:0] - length of the message; 15:6] - message
3112 pointer; 20:16] - next pointer. */
3113#define TCM_REG_XX_DESCR_TABLE 0x50280
3114#define TCM_REG_XX_DESCR_TABLE_SIZE 32
3115/* [R 6] Use to read the value of XX protection Free counter. */
3116#define TCM_REG_XX_FREE 0x50178
3117/* [RW 6] Initial value for the credit counter; responsible for fulfilling
3118 of the Input Stage XX protection buffer by the XX protection pending
3119 messages. Max credit available - 127.Write writes the initial credit
3120 value; read returns the current value of the credit counter. Must be
3121 initialized to 19 at start-up. */
3122#define TCM_REG_XX_INIT_CRD 0x50220
3123/* [RW 6] Maximum link list size (messages locked) per connection in the XX
3124 protection. */
3125#define TCM_REG_XX_MAX_LL_SZ 0x50044
3126/* [RW 6] The maximum number of pending messages; which may be stored in XX
3127 protection. ~tcm_registers_xx_free.xx_free is read on read. */
3128#define TCM_REG_XX_MSG_NUM 0x50224
3129/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
3130#define TCM_REG_XX_OVFL_EVNT_ID 0x50048
3131/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
3132 The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
3133 header pointer. */
3134#define TCM_REG_XX_TABLE 0x50240
3135/* [RW 4] Load value for cfc ac credit cnt. */
3136#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
3137/* [RW 4] Load value for cfc cld credit cnt. */
3138#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
3139/* [RW 8] Client0 context region. */
3140#define TM_REG_CL0_CONT_REGION 0x164030
3141/* [RW 8] Client1 context region. */
3142#define TM_REG_CL1_CONT_REGION 0x164034
3143/* [RW 8] Client2 context region. */
3144#define TM_REG_CL2_CONT_REGION 0x164038
3145/* [RW 2] Client in High priority client number. */
3146#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024
3147/* [RW 4] Load value for clout0 cred cnt. */
3148#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220
3149/* [RW 4] Load value for clout1 cred cnt. */
3150#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228
3151/* [RW 4] Load value for clout2 cred cnt. */
3152#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230
3153/* [RW 1] Enable client0 input. */
3154#define TM_REG_EN_CL0_INPUT 0x164008
3155/* [RW 1] Enable client1 input. */
3156#define TM_REG_EN_CL1_INPUT 0x16400c
3157/* [RW 1] Enable client2 input. */
3158#define TM_REG_EN_CL2_INPUT 0x164010
3159#define TM_REG_EN_LINEAR0_TIMER 0x164014
3160/* [RW 1] Enable real time counter. */
3161#define TM_REG_EN_REAL_TIME_CNT 0x1640d8
3162/* [RW 1] Enable for Timers state machines. */
3163#define TM_REG_EN_TIMERS 0x164000
3164/* [RW 4] Load value for expiration credit cnt. CFC max number of
3165 outstanding load requests for timers (expiration) context loading. */
3166#define TM_REG_EXP_CRDCNT_VAL 0x164238
3167/* [RW 32] Linear0 logic address. */
3168#define TM_REG_LIN0_LOGIC_ADDR 0x164240
3169/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
3170#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
3171/* [WB 64] Linear0 phy address. */
3172#define TM_REG_LIN0_PHY_ADDR 0x164270
3173/* [RW 1] Linear0 physical address valid. */
3174#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
3175#define TM_REG_LIN0_SCAN_ON 0x1640d0
3176/* [RW 24] Linear0 array scan timeout. */
3177#define TM_REG_LIN0_SCAN_TIME 0x16403c
3178/* [RW 32] Linear1 logic address. */
3179#define TM_REG_LIN1_LOGIC_ADDR 0x164250
3180/* [WB 64] Linear1 phy address. */
3181#define TM_REG_LIN1_PHY_ADDR 0x164280
3182/* [RW 1] Linear1 physical address valid. */
3183#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
3184/* [RW 6] Linear timer set_clear fifo threshold. */
3185#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
3186/* [RW 2] Load value for pci arbiter credit cnt. */
3187#define TM_REG_PCIARB_CRDCNT_VAL 0x164260
3188/* [RW 20] The amount of hardware cycles for each timer tick. */
3189#define TM_REG_TIMER_TICK_SIZE 0x16401c
3190/* [RW 8] Timers Context region. */
3191#define TM_REG_TM_CONTEXT_REGION 0x164044
3192/* [RW 1] Interrupt mask register #0 read/write */
3193#define TM_REG_TM_INT_MASK 0x1640fc
3194/* [R 1] Interrupt register #0 read */
3195#define TM_REG_TM_INT_STS 0x1640f0
3196/* [RW 8] The event id for aggregated interrupt 0 */
3197#define TSDM_REG_AGG_INT_EVENT_0 0x42038
3198#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
3199#define TSDM_REG_AGG_INT_EVENT_2 0x42040
3200#define TSDM_REG_AGG_INT_EVENT_3 0x42044
3201#define TSDM_REG_AGG_INT_EVENT_4 0x42048
3202/* [RW 1] The T bit for aggregated interrupt 0 */
3203#define TSDM_REG_AGG_INT_T_0 0x420b8
3204#define TSDM_REG_AGG_INT_T_1 0x420bc
3205/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
3206#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
3207/* [RW 16] The maximum value of the competion counter #0 */
3208#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
3209/* [RW 16] The maximum value of the competion counter #1 */
3210#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
3211/* [RW 16] The maximum value of the competion counter #2 */
3212#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
3213/* [RW 16] The maximum value of the competion counter #3 */
3214#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
3215/* [RW 13] The start address in the internal RAM for the completion
3216 counters. */
3217#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c
3218#define TSDM_REG_ENABLE_IN1 0x42238
3219#define TSDM_REG_ENABLE_IN2 0x4223c
3220#define TSDM_REG_ENABLE_OUT1 0x42240
3221#define TSDM_REG_ENABLE_OUT2 0x42244
3222/* [RW 4] The initial number of messages that can be sent to the pxp control
3223 interface without receiving any ACK. */
3224#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc
3225/* [ST 32] The number of ACK after placement messages received */
3226#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c
3227/* [ST 32] The number of packet end messages received from the parser */
3228#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274
3229/* [ST 32] The number of requests received from the pxp async if */
3230#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278
3231/* [ST 32] The number of commands received in queue 0 */
3232#define TSDM_REG_NUM_OF_Q0_CMD 0x42248
3233/* [ST 32] The number of commands received in queue 10 */
3234#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c
3235/* [ST 32] The number of commands received in queue 11 */
3236#define TSDM_REG_NUM_OF_Q11_CMD 0x42270
3237/* [ST 32] The number of commands received in queue 1 */
3238#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c
3239/* [ST 32] The number of commands received in queue 3 */
3240#define TSDM_REG_NUM_OF_Q3_CMD 0x42250
3241/* [ST 32] The number of commands received in queue 4 */
3242#define TSDM_REG_NUM_OF_Q4_CMD 0x42254
3243/* [ST 32] The number of commands received in queue 5 */
3244#define TSDM_REG_NUM_OF_Q5_CMD 0x42258
3245/* [ST 32] The number of commands received in queue 6 */
3246#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c
3247/* [ST 32] The number of commands received in queue 7 */
3248#define TSDM_REG_NUM_OF_Q7_CMD 0x42260
3249/* [ST 32] The number of commands received in queue 8 */
3250#define TSDM_REG_NUM_OF_Q8_CMD 0x42264
3251/* [ST 32] The number of commands received in queue 9 */
3252#define TSDM_REG_NUM_OF_Q9_CMD 0x42268
3253/* [RW 13] The start address in the internal RAM for the packet end message */
3254#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014
3255/* [RW 13] The start address in the internal RAM for queue counters */
3256#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010
3257/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
3258#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548
3259/* [R 1] parser fifo empty in sdm_sync block */
3260#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550
3261/* [R 1] parser serial fifo empty in sdm_sync block */
3262#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558
3263/* [RW 32] Tick for timer counter. Applicable only when
3264 ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
3265#define TSDM_REG_TIMER_TICK 0x42000
3266/* [RW 32] Interrupt mask register #0 read/write */
3267#define TSDM_REG_TSDM_INT_MASK_0 0x4229c
3268#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
3269/* [R 32] Interrupt register #0 read */
3270#define TSDM_REG_TSDM_INT_STS_0 0x42290
3271#define TSDM_REG_TSDM_INT_STS_1 0x422a0
3272/* [RW 11] Parity mask register #0 read/write */
3273#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
3274/* [R 11] Parity register #0 read */
3275#define TSDM_REG_TSDM_PRTY_STS 0x422b0
3276/* [RW 5] The number of time_slots in the arbitration cycle */
3277#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
3278/* [RW 3] The source that is associated with arbitration element 0. Source
3279 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3280 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
3281#define TSEM_REG_ARB_ELEMENT0 0x180020
3282/* [RW 3] The source that is associated with arbitration element 1. Source
3283 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3284 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3285 Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
3286#define TSEM_REG_ARB_ELEMENT1 0x180024
3287/* [RW 3] The source that is associated with arbitration element 2. Source
3288 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3289 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3290 Could not be equal to register ~tsem_registers_arb_element0.arb_element0
3291 and ~tsem_registers_arb_element1.arb_element1 */
3292#define TSEM_REG_ARB_ELEMENT2 0x180028
3293/* [RW 3] The source that is associated with arbitration element 3. Source
3294 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3295 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
3296 not be equal to register ~tsem_registers_arb_element0.arb_element0 and
3297 ~tsem_registers_arb_element1.arb_element1 and
3298 ~tsem_registers_arb_element2.arb_element2 */
3299#define TSEM_REG_ARB_ELEMENT3 0x18002c
3300/* [RW 3] The source that is associated with arbitration element 4. Source
3301 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3302 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3303 Could not be equal to register ~tsem_registers_arb_element0.arb_element0
3304 and ~tsem_registers_arb_element1.arb_element1 and
3305 ~tsem_registers_arb_element2.arb_element2 and
3306 ~tsem_registers_arb_element3.arb_element3 */
3307#define TSEM_REG_ARB_ELEMENT4 0x180030
3308#define TSEM_REG_ENABLE_IN 0x1800a4
3309#define TSEM_REG_ENABLE_OUT 0x1800a8
3310/* [RW 32] This address space contains all registers and memories that are
3311 placed in SEM_FAST block. The SEM_FAST registers are described in
3312 appendix B. In order to access the sem_fast registers the base address
3313 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
3314#define TSEM_REG_FAST_MEMORY 0x1a0000
3315/* [RW 1] Disables input messages from FIC0 May be updated during run_time
3316 by the microcode */
3317#define TSEM_REG_FIC0_DISABLE 0x180224
3318/* [RW 1] Disables input messages from FIC1 May be updated during run_time
3319 by the microcode */
3320#define TSEM_REG_FIC1_DISABLE 0x180234
3321/* [RW 15] Interrupt table Read and write access to it is not possible in
3322 the middle of the work */
3323#define TSEM_REG_INT_TABLE 0x180400
3324/* [ST 24] Statistics register. The number of messages that entered through
3325 FIC0 */
3326#define TSEM_REG_MSG_NUM_FIC0 0x180000
3327/* [ST 24] Statistics register. The number of messages that entered through
3328 FIC1 */
3329#define TSEM_REG_MSG_NUM_FIC1 0x180004
3330/* [ST 24] Statistics register. The number of messages that were sent to
3331 FOC0 */
3332#define TSEM_REG_MSG_NUM_FOC0 0x180008
3333/* [ST 24] Statistics register. The number of messages that were sent to
3334 FOC1 */
3335#define TSEM_REG_MSG_NUM_FOC1 0x18000c
3336/* [ST 24] Statistics register. The number of messages that were sent to
3337 FOC2 */
3338#define TSEM_REG_MSG_NUM_FOC2 0x180010
3339/* [ST 24] Statistics register. The number of messages that were sent to
3340 FOC3 */
3341#define TSEM_REG_MSG_NUM_FOC3 0x180014
3342/* [RW 1] Disables input messages from the passive buffer May be updated
3343 during run_time by the microcode */
3344#define TSEM_REG_PAS_DISABLE 0x18024c
3345/* [WB 128] Debug only. Passive buffer memory */
3346#define TSEM_REG_PASSIVE_BUFFER 0x181000
3347/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
3348#define TSEM_REG_PRAM 0x1c0000
3349/* [R 8] Valid sleeping threads indication have bit per thread */
3350#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c
3351/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
3352#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
3353/* [RW 8] List of free threads . There is a bit per thread. */
3354#define TSEM_REG_THREADS_LIST 0x1802e4
3355/* [RW 3] The arbitration scheme of time_slot 0 */
3356#define TSEM_REG_TS_0_AS 0x180038
3357/* [RW 3] The arbitration scheme of time_slot 10 */
3358#define TSEM_REG_TS_10_AS 0x180060
3359/* [RW 3] The arbitration scheme of time_slot 11 */
3360#define TSEM_REG_TS_11_AS 0x180064
3361/* [RW 3] The arbitration scheme of time_slot 12 */
3362#define TSEM_REG_TS_12_AS 0x180068
3363/* [RW 3] The arbitration scheme of time_slot 13 */
3364#define TSEM_REG_TS_13_AS 0x18006c
3365/* [RW 3] The arbitration scheme of time_slot 14 */
3366#define TSEM_REG_TS_14_AS 0x180070
3367/* [RW 3] The arbitration scheme of time_slot 15 */
3368#define TSEM_REG_TS_15_AS 0x180074
3369/* [RW 3] The arbitration scheme of time_slot 16 */
3370#define TSEM_REG_TS_16_AS 0x180078
3371/* [RW 3] The arbitration scheme of time_slot 17 */
3372#define TSEM_REG_TS_17_AS 0x18007c
3373/* [RW 3] The arbitration scheme of time_slot 18 */
3374#define TSEM_REG_TS_18_AS 0x180080
3375/* [RW 3] The arbitration scheme of time_slot 1 */
3376#define TSEM_REG_TS_1_AS 0x18003c
3377/* [RW 3] The arbitration scheme of time_slot 2 */
3378#define TSEM_REG_TS_2_AS 0x180040
3379/* [RW 3] The arbitration scheme of time_slot 3 */
3380#define TSEM_REG_TS_3_AS 0x180044
3381/* [RW 3] The arbitration scheme of time_slot 4 */
3382#define TSEM_REG_TS_4_AS 0x180048
3383/* [RW 3] The arbitration scheme of time_slot 5 */
3384#define TSEM_REG_TS_5_AS 0x18004c
3385/* [RW 3] The arbitration scheme of time_slot 6 */
3386#define TSEM_REG_TS_6_AS 0x180050
3387/* [RW 3] The arbitration scheme of time_slot 7 */
3388#define TSEM_REG_TS_7_AS 0x180054
3389/* [RW 3] The arbitration scheme of time_slot 8 */
3390#define TSEM_REG_TS_8_AS 0x180058
3391/* [RW 3] The arbitration scheme of time_slot 9 */
3392#define TSEM_REG_TS_9_AS 0x18005c
3393/* [RW 32] Interrupt mask register #0 read/write */
3394#define TSEM_REG_TSEM_INT_MASK_0 0x180100
3395#define TSEM_REG_TSEM_INT_MASK_1 0x180110
3396/* [R 32] Interrupt register #0 read */
3397#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
3398#define TSEM_REG_TSEM_INT_STS_1 0x180104
3399/* [RW 32] Parity mask register #0 read/write */
3400#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
3401#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
3402/* [R 32] Parity register #0 read */
3403#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
3404#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
3405/* [R 5] Used to read the XX protection CAM occupancy counter. */
3406#define UCM_REG_CAM_OCCUP 0xe0170
3407/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
3408 disregarded; valid output is deasserted; all other signals are treated as
3409 usual; if 1 - normal activity. */
3410#define UCM_REG_CDU_AG_RD_IFEN 0xe0038
3411/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
3412 are disregarded; all other signals are treated as usual; if 1 - normal
3413 activity. */
3414#define UCM_REG_CDU_AG_WR_IFEN 0xe0034
3415/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
3416 disregarded; valid output is deasserted; all other signals are treated as
3417 usual; if 1 - normal activity. */
3418#define UCM_REG_CDU_SM_RD_IFEN 0xe0040
3419/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
3420 input is disregarded; all other signals are treated as usual; if 1 -
3421 normal activity. */
3422#define UCM_REG_CDU_SM_WR_IFEN 0xe003c
3423/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
3424 the initial credit value; read returns the current value of the credit
3425 counter. Must be initialized to 1 at start-up. */
3426#define UCM_REG_CFC_INIT_CRD 0xe0204
3427/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
3428 weight 8 (the most prioritised); 1 stands for weight 1(least
3429 prioritised); 2 stands for weight 2; tc. */
3430#define UCM_REG_CP_WEIGHT 0xe00c4
3431/* [RW 1] Input csem Interface enable. If 0 - the valid input is
3432 disregarded; acknowledge output is deasserted; all other signals are
3433 treated as usual; if 1 - normal activity. */
3434#define UCM_REG_CSEM_IFEN 0xe0028
3435/* [RC 1] Set when the message length mismatch (relative to last indication)
3436 at the csem interface is detected. */
3437#define UCM_REG_CSEM_LENGTH_MIS 0xe0160
3438/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
3439 weight 8 (the most prioritised); 1 stands for weight 1(least
3440 prioritised); 2 stands for weight 2; tc. */
3441#define UCM_REG_CSEM_WEIGHT 0xe00b8
3442/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
3443 disregarded; acknowledge output is deasserted; all other signals are
3444 treated as usual; if 1 - normal activity. */
3445#define UCM_REG_DORQ_IFEN 0xe0030
3446/* [RC 1] Set when the message length mismatch (relative to last indication)
3447 at the dorq interface is detected. */
3448#define UCM_REG_DORQ_LENGTH_MIS 0xe0168
3449/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
3450 weight 8 (the most prioritised); 1 stands for weight 1(least
3451 prioritised); 2 stands for weight 2; tc. */
3452#define UCM_REG_DORQ_WEIGHT 0xe00c0
3453/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
3454#define UCM_REG_ERR_EVNT_ID 0xe00a4
3455/* [RW 28] The CM erroneous header for QM and Timers formatting. */
3456#define UCM_REG_ERR_UCM_HDR 0xe00a0
3457/* [RW 8] The Event ID for Timers expiration. */
3458#define UCM_REG_EXPR_EVNT_ID 0xe00a8
3459/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
3460 writes the initial credit value; read returns the current value of the
3461 credit counter. Must be initialized to 64 at start-up. */
3462#define UCM_REG_FIC0_INIT_CRD 0xe020c
3463/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
3464 writes the initial credit value; read returns the current value of the
3465 credit counter. Must be initialized to 64 at start-up. */
3466#define UCM_REG_FIC1_INIT_CRD 0xe0210
3467/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
3468 - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
3469 ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
3470 ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
3471#define UCM_REG_GR_ARB_TYPE 0xe0144
3472/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
3473 highest priority is 3. It is supposed that the Store channel group is
3474 compliment to the others. */
3475#define UCM_REG_GR_LD0_PR 0xe014c
3476/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
3477 highest priority is 3. It is supposed that the Store channel group is
3478 compliment to the others. */
3479#define UCM_REG_GR_LD1_PR 0xe0150
3480/* [RW 2] The queue index for invalidate counter flag decision. */
3481#define UCM_REG_INV_CFLG_Q 0xe00e4
3482/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
3483 sent to STORM; for a specific connection type. the double REG-pairs are
3484 used in order to align to STORM context row size of 128 bits. The offset
3485 of these data in the STORM context is always 0. Index _i stands for the
3486 connection type (one of 16). */
3487#define UCM_REG_N_SM_CTX_LD_0 0xe0054
3488#define UCM_REG_N_SM_CTX_LD_1 0xe0058
3489#define UCM_REG_N_SM_CTX_LD_2 0xe005c
3490#define UCM_REG_N_SM_CTX_LD_3 0xe0060
3491#define UCM_REG_N_SM_CTX_LD_4 0xe0064
3492#define UCM_REG_N_SM_CTX_LD_5 0xe0068
3493#define UCM_REG_PHYS_QNUM0_0 0xe0110
3494#define UCM_REG_PHYS_QNUM0_1 0xe0114
3495#define UCM_REG_PHYS_QNUM1_0 0xe0118
3496#define UCM_REG_PHYS_QNUM1_1 0xe011c
3497#define UCM_REG_PHYS_QNUM2_0 0xe0120
3498#define UCM_REG_PHYS_QNUM2_1 0xe0124
3499#define UCM_REG_PHYS_QNUM3_0 0xe0128
3500#define UCM_REG_PHYS_QNUM3_1 0xe012c
3501/* [RW 8] The Event ID for Timers formatting in case of stop done. */
3502#define UCM_REG_STOP_EVNT_ID 0xe00ac
3503/* [RC 1] Set when the message length mismatch (relative to last indication)
3504 at the STORM interface is detected. */
3505#define UCM_REG_STORM_LENGTH_MIS 0xe0154
3506/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
3507 disregarded; acknowledge output is deasserted; all other signals are
3508 treated as usual; if 1 - normal activity. */
3509#define UCM_REG_STORM_UCM_IFEN 0xe0010
3510/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
3511 weight 8 (the most prioritised); 1 stands for weight 1(least
3512 prioritised); 2 stands for weight 2; tc. */
3513#define UCM_REG_STORM_WEIGHT 0xe00b0
3514/* [RW 4] Timers output initial credit. Max credit available - 15.Write
3515 writes the initial credit value; read returns the current value of the
3516 credit counter. Must be initialized to 4 at start-up. */
3517#define UCM_REG_TM_INIT_CRD 0xe021c
3518/* [RW 28] The CM header for Timers expiration command. */
3519#define UCM_REG_TM_UCM_HDR 0xe009c
3520/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
3521 disregarded; acknowledge output is deasserted; all other signals are
3522 treated as usual; if 1 - normal activity. */
3523#define UCM_REG_TM_UCM_IFEN 0xe001c
3524/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
3525 weight 8 (the most prioritised); 1 stands for weight 1(least
3526 prioritised); 2 stands for weight 2; tc. */
3527#define UCM_REG_TM_WEIGHT 0xe00d4
3528/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
3529 disregarded; acknowledge output is deasserted; all other signals are
3530 treated as usual; if 1 - normal activity. */
3531#define UCM_REG_TSEM_IFEN 0xe0024
3532/* [RC 1] Set when the message length mismatch (relative to last indication)
3533 at the tsem interface is detected. */
3534#define UCM_REG_TSEM_LENGTH_MIS 0xe015c
3535/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
3536 weight 8 (the most prioritised); 1 stands for weight 1(least
3537 prioritised); 2 stands for weight 2; tc. */
3538#define UCM_REG_TSEM_WEIGHT 0xe00b4
3539/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
3540 acknowledge output is deasserted; all other signals are treated as usual;
3541 if 1 - normal activity. */
3542#define UCM_REG_UCM_CFC_IFEN 0xe0044
3543/* [RW 11] Interrupt mask register #0 read/write */
3544#define UCM_REG_UCM_INT_MASK 0xe01d4
3545/* [R 11] Interrupt register #0 read */
3546#define UCM_REG_UCM_INT_STS 0xe01c8
3547/* [R 27] Parity register #0 read */
3548#define UCM_REG_UCM_PRTY_STS 0xe01d8
3549/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
3550 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3551 Is used to determine the number of the AG context REG-pairs written back;
3552 when the Reg1WbFlg isn't set. */
3553#define UCM_REG_UCM_REG0_SZ 0xe00dc
3554/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
3555 disregarded; valid is deasserted; all other signals are treated as usual;
3556 if 1 - normal activity. */
3557#define UCM_REG_UCM_STORM0_IFEN 0xe0004
3558/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
3559 disregarded; valid is deasserted; all other signals are treated as usual;
3560 if 1 - normal activity. */
3561#define UCM_REG_UCM_STORM1_IFEN 0xe0008
3562/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
3563 disregarded; acknowledge output is deasserted; all other signals are
3564 treated as usual; if 1 - normal activity. */
3565#define UCM_REG_UCM_TM_IFEN 0xe0020
3566/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
3567 disregarded; valid is deasserted; all other signals are treated as usual;
3568 if 1 - normal activity. */
3569#define UCM_REG_UCM_UQM_IFEN 0xe000c
3570/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
3571#define UCM_REG_UCM_UQM_USE_Q 0xe00d8
3572/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
3573 the initial credit value; read returns the current value of the credit
3574 counter. Must be initialized to 32 at start-up. */
3575#define UCM_REG_UQM_INIT_CRD 0xe0220
3576/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
3577 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3578 prioritised); 2 stands for weight 2; tc. */
3579#define UCM_REG_UQM_P_WEIGHT 0xe00cc
3580/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
3581 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
3582 prioritised); 2 stands for weight 2; tc. */
3583#define UCM_REG_UQM_S_WEIGHT 0xe00d0
3584/* [RW 28] The CM header value for QM request (primary). */
3585#define UCM_REG_UQM_UCM_HDR_P 0xe0094
3586/* [RW 28] The CM header value for QM request (secondary). */
3587#define UCM_REG_UQM_UCM_HDR_S 0xe0098
3588/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
3589 acknowledge output is deasserted; all other signals are treated as usual;
3590 if 1 - normal activity. */
3591#define UCM_REG_UQM_UCM_IFEN 0xe0014
3592/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
3593 acknowledge output is deasserted; all other signals are treated as usual;
3594 if 1 - normal activity. */
3595#define UCM_REG_USDM_IFEN 0xe0018
3596/* [RC 1] Set when the message length mismatch (relative to last indication)
3597 at the SDM interface is detected. */
3598#define UCM_REG_USDM_LENGTH_MIS 0xe0158
3599/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
3600 weight 8 (the most prioritised); 1 stands for weight 1(least
3601 prioritised); 2 stands for weight 2; tc. */
3602#define UCM_REG_USDM_WEIGHT 0xe00c8
3603/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
3604 disregarded; acknowledge output is deasserted; all other signals are
3605 treated as usual; if 1 - normal activity. */
3606#define UCM_REG_XSEM_IFEN 0xe002c
3607/* [RC 1] Set when the message length mismatch (relative to last indication)
3608 at the xsem interface isdetected. */
3609#define UCM_REG_XSEM_LENGTH_MIS 0xe0164
3610/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
3611 weight 8 (the most prioritised); 1 stands for weight 1(least
3612 prioritised); 2 stands for weight 2; tc. */
3613#define UCM_REG_XSEM_WEIGHT 0xe00bc
3614/* [RW 20] Indirect access to the descriptor table of the XX protection
3615 mechanism. The fields are:[5:0] - message length; 14:6] - message
3616 pointer; 19:15] - next pointer. */
3617#define UCM_REG_XX_DESCR_TABLE 0xe0280
3618#define UCM_REG_XX_DESCR_TABLE_SIZE 32
3619/* [R 6] Use to read the XX protection Free counter. */
3620#define UCM_REG_XX_FREE 0xe016c
3621/* [RW 6] Initial value for the credit counter; responsible for fulfilling
3622 of the Input Stage XX protection buffer by the XX protection pending
3623 messages. Write writes the initial credit value; read returns the current
3624 value of the credit counter. Must be initialized to 12 at start-up. */
3625#define UCM_REG_XX_INIT_CRD 0xe0224
3626/* [RW 6] The maximum number of pending messages; which may be stored in XX
3627 protection. ~ucm_registers_xx_free.xx_free read on read. */
3628#define UCM_REG_XX_MSG_NUM 0xe0228
3629/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
3630#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c
3631/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
3632 The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
3633 header pointer. */
3634#define UCM_REG_XX_TABLE 0xe0300
3635/* [RW 8] The event id for aggregated interrupt 0 */
3636#define USDM_REG_AGG_INT_EVENT_0 0xc4038
3637#define USDM_REG_AGG_INT_EVENT_1 0xc403c
3638#define USDM_REG_AGG_INT_EVENT_2 0xc4040
3639#define USDM_REG_AGG_INT_EVENT_4 0xc4048
3640#define USDM_REG_AGG_INT_EVENT_5 0xc404c
3641#define USDM_REG_AGG_INT_EVENT_6 0xc4050
3642/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
3643 or auto-mask-mode (1) */
3644#define USDM_REG_AGG_INT_MODE_0 0xc41b8
3645#define USDM_REG_AGG_INT_MODE_1 0xc41bc
3646#define USDM_REG_AGG_INT_MODE_4 0xc41c8
3647#define USDM_REG_AGG_INT_MODE_5 0xc41cc
3648#define USDM_REG_AGG_INT_MODE_6 0xc41d0
3649/* [RW 1] The T bit for aggregated interrupt 5 */
3650#define USDM_REG_AGG_INT_T_5 0xc40cc
3651#define USDM_REG_AGG_INT_T_6 0xc40d0
3652/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
3653#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
3654/* [RW 16] The maximum value of the competion counter #0 */
3655#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
3656/* [RW 16] The maximum value of the competion counter #1 */
3657#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
3658/* [RW 16] The maximum value of the competion counter #2 */
3659#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
3660/* [RW 16] The maximum value of the competion counter #3 */
3661#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
3662/* [RW 13] The start address in the internal RAM for the completion
3663 counters. */
3664#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c
3665#define USDM_REG_ENABLE_IN1 0xc4238
3666#define USDM_REG_ENABLE_IN2 0xc423c
3667#define USDM_REG_ENABLE_OUT1 0xc4240
3668#define USDM_REG_ENABLE_OUT2 0xc4244
3669/* [RW 4] The initial number of messages that can be sent to the pxp control
3670 interface without receiving any ACK. */
3671#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0
3672/* [ST 32] The number of ACK after placement messages received */
3673#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280
3674/* [ST 32] The number of packet end messages received from the parser */
3675#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278
3676/* [ST 32] The number of requests received from the pxp async if */
3677#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c
3678/* [ST 32] The number of commands received in queue 0 */
3679#define USDM_REG_NUM_OF_Q0_CMD 0xc4248
3680/* [ST 32] The number of commands received in queue 10 */
3681#define USDM_REG_NUM_OF_Q10_CMD 0xc4270
3682/* [ST 32] The number of commands received in queue 11 */
3683#define USDM_REG_NUM_OF_Q11_CMD 0xc4274
3684/* [ST 32] The number of commands received in queue 1 */
3685#define USDM_REG_NUM_OF_Q1_CMD 0xc424c
3686/* [ST 32] The number of commands received in queue 2 */
3687#define USDM_REG_NUM_OF_Q2_CMD 0xc4250
3688/* [ST 32] The number of commands received in queue 3 */
3689#define USDM_REG_NUM_OF_Q3_CMD 0xc4254
3690/* [ST 32] The number of commands received in queue 4 */
3691#define USDM_REG_NUM_OF_Q4_CMD 0xc4258
3692/* [ST 32] The number of commands received in queue 5 */
3693#define USDM_REG_NUM_OF_Q5_CMD 0xc425c
3694/* [ST 32] The number of commands received in queue 6 */
3695#define USDM_REG_NUM_OF_Q6_CMD 0xc4260
3696/* [ST 32] The number of commands received in queue 7 */
3697#define USDM_REG_NUM_OF_Q7_CMD 0xc4264
3698/* [ST 32] The number of commands received in queue 8 */
3699#define USDM_REG_NUM_OF_Q8_CMD 0xc4268
3700/* [ST 32] The number of commands received in queue 9 */
3701#define USDM_REG_NUM_OF_Q9_CMD 0xc426c
3702/* [RW 13] The start address in the internal RAM for the packet end message */
3703#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014
3704/* [RW 13] The start address in the internal RAM for queue counters */
3705#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010
3706/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
3707#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550
3708/* [R 1] parser fifo empty in sdm_sync block */
3709#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558
3710/* [R 1] parser serial fifo empty in sdm_sync block */
3711#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560
3712/* [RW 32] Tick for timer counter. Applicable only when
3713 ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
3714#define USDM_REG_TIMER_TICK 0xc4000
3715/* [RW 32] Interrupt mask register #0 read/write */
3716#define USDM_REG_USDM_INT_MASK_0 0xc42a0
3717#define USDM_REG_USDM_INT_MASK_1 0xc42b0
3718/* [R 32] Interrupt register #0 read */
3719#define USDM_REG_USDM_INT_STS_0 0xc4294
3720#define USDM_REG_USDM_INT_STS_1 0xc42a4
3721/* [RW 11] Parity mask register #0 read/write */
3722#define USDM_REG_USDM_PRTY_MASK 0xc42c0
3723/* [R 11] Parity register #0 read */
3724#define USDM_REG_USDM_PRTY_STS 0xc42b4
3725/* [RW 5] The number of time_slots in the arbitration cycle */
3726#define USEM_REG_ARB_CYCLE_SIZE 0x300034
3727/* [RW 3] The source that is associated with arbitration element 0. Source
3728 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3729 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
3730#define USEM_REG_ARB_ELEMENT0 0x300020
3731/* [RW 3] The source that is associated with arbitration element 1. Source
3732 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3733 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3734 Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
3735#define USEM_REG_ARB_ELEMENT1 0x300024
3736/* [RW 3] The source that is associated with arbitration element 2. Source
3737 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3738 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3739 Could not be equal to register ~usem_registers_arb_element0.arb_element0
3740 and ~usem_registers_arb_element1.arb_element1 */
3741#define USEM_REG_ARB_ELEMENT2 0x300028
3742/* [RW 3] The source that is associated with arbitration element 3. Source
3743 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3744 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
3745 not be equal to register ~usem_registers_arb_element0.arb_element0 and
3746 ~usem_registers_arb_element1.arb_element1 and
3747 ~usem_registers_arb_element2.arb_element2 */
3748#define USEM_REG_ARB_ELEMENT3 0x30002c
3749/* [RW 3] The source that is associated with arbitration element 4. Source
3750 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
3751 sleeping thread with priority 1; 4- sleeping thread with priority 2.
3752 Could not be equal to register ~usem_registers_arb_element0.arb_element0
3753 and ~usem_registers_arb_element1.arb_element1 and
3754 ~usem_registers_arb_element2.arb_element2 and
3755 ~usem_registers_arb_element3.arb_element3 */
3756#define USEM_REG_ARB_ELEMENT4 0x300030
3757#define USEM_REG_ENABLE_IN 0x3000a4
3758#define USEM_REG_ENABLE_OUT 0x3000a8
3759/* [RW 32] This address space contains all registers and memories that are
3760 placed in SEM_FAST block. The SEM_FAST registers are described in
3761 appendix B. In order to access the sem_fast registers the base address
3762 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
3763#define USEM_REG_FAST_MEMORY 0x320000
3764/* [RW 1] Disables input messages from FIC0 May be updated during run_time
3765 by the microcode */
3766#define USEM_REG_FIC0_DISABLE 0x300224
3767/* [RW 1] Disables input messages from FIC1 May be updated during run_time
3768 by the microcode */
3769#define USEM_REG_FIC1_DISABLE 0x300234
3770/* [RW 15] Interrupt table Read and write access to it is not possible in
3771 the middle of the work */
3772#define USEM_REG_INT_TABLE 0x300400
3773/* [ST 24] Statistics register. The number of messages that entered through
3774 FIC0 */
3775#define USEM_REG_MSG_NUM_FIC0 0x300000
3776/* [ST 24] Statistics register. The number of messages that entered through
3777 FIC1 */
3778#define USEM_REG_MSG_NUM_FIC1 0x300004
3779/* [ST 24] Statistics register. The number of messages that were sent to
3780 FOC0 */
3781#define USEM_REG_MSG_NUM_FOC0 0x300008
3782/* [ST 24] Statistics register. The number of messages that were sent to
3783 FOC1 */
3784#define USEM_REG_MSG_NUM_FOC1 0x30000c
3785/* [ST 24] Statistics register. The number of messages that were sent to
3786 FOC2 */
3787#define USEM_REG_MSG_NUM_FOC2 0x300010
3788/* [ST 24] Statistics register. The number of messages that were sent to
3789 FOC3 */
3790#define USEM_REG_MSG_NUM_FOC3 0x300014
3791/* [RW 1] Disables input messages from the passive buffer May be updated
3792 during run_time by the microcode */
3793#define USEM_REG_PAS_DISABLE 0x30024c
3794/* [WB 128] Debug only. Passive buffer memory */
3795#define USEM_REG_PASSIVE_BUFFER 0x302000
3796/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
3797#define USEM_REG_PRAM 0x340000
3798/* [R 16] Valid sleeping threads indication have bit per thread */
3799#define USEM_REG_SLEEP_THREADS_VALID 0x30026c
3800/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
3801#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0
3802/* [RW 16] List of free threads . There is a bit per thread. */
3803#define USEM_REG_THREADS_LIST 0x3002e4
3804/* [RW 3] The arbitration scheme of time_slot 0 */
3805#define USEM_REG_TS_0_AS 0x300038
3806/* [RW 3] The arbitration scheme of time_slot 10 */
3807#define USEM_REG_TS_10_AS 0x300060
3808/* [RW 3] The arbitration scheme of time_slot 11 */
3809#define USEM_REG_TS_11_AS 0x300064
3810/* [RW 3] The arbitration scheme of time_slot 12 */
3811#define USEM_REG_TS_12_AS 0x300068
3812/* [RW 3] The arbitration scheme of time_slot 13 */
3813#define USEM_REG_TS_13_AS 0x30006c
3814/* [RW 3] The arbitration scheme of time_slot 14 */
3815#define USEM_REG_TS_14_AS 0x300070
3816/* [RW 3] The arbitration scheme of time_slot 15 */
3817#define USEM_REG_TS_15_AS 0x300074
3818/* [RW 3] The arbitration scheme of time_slot 16 */
3819#define USEM_REG_TS_16_AS 0x300078
3820/* [RW 3] The arbitration scheme of time_slot 17 */
3821#define USEM_REG_TS_17_AS 0x30007c
3822/* [RW 3] The arbitration scheme of time_slot 18 */
3823#define USEM_REG_TS_18_AS 0x300080
3824/* [RW 3] The arbitration scheme of time_slot 1 */
3825#define USEM_REG_TS_1_AS 0x30003c
3826/* [RW 3] The arbitration scheme of time_slot 2 */
3827#define USEM_REG_TS_2_AS 0x300040
3828/* [RW 3] The arbitration scheme of time_slot 3 */
3829#define USEM_REG_TS_3_AS 0x300044
3830/* [RW 3] The arbitration scheme of time_slot 4 */
3831#define USEM_REG_TS_4_AS 0x300048
3832/* [RW 3] The arbitration scheme of time_slot 5 */
3833#define USEM_REG_TS_5_AS 0x30004c
3834/* [RW 3] The arbitration scheme of time_slot 6 */
3835#define USEM_REG_TS_6_AS 0x300050
3836/* [RW 3] The arbitration scheme of time_slot 7 */
3837#define USEM_REG_TS_7_AS 0x300054
3838/* [RW 3] The arbitration scheme of time_slot 8 */
3839#define USEM_REG_TS_8_AS 0x300058
3840/* [RW 3] The arbitration scheme of time_slot 9 */
3841#define USEM_REG_TS_9_AS 0x30005c
3842/* [RW 32] Interrupt mask register #0 read/write */
3843#define USEM_REG_USEM_INT_MASK_0 0x300110
3844#define USEM_REG_USEM_INT_MASK_1 0x300120
3845/* [R 32] Interrupt register #0 read */
3846#define USEM_REG_USEM_INT_STS_0 0x300104
3847#define USEM_REG_USEM_INT_STS_1 0x300114
3848/* [RW 32] Parity mask register #0 read/write */
3849#define USEM_REG_USEM_PRTY_MASK_0 0x300130
3850#define USEM_REG_USEM_PRTY_MASK_1 0x300140
3851/* [R 32] Parity register #0 read */
3852#define USEM_REG_USEM_PRTY_STS_0 0x300124
3853#define USEM_REG_USEM_PRTY_STS_1 0x300134
3854/* [RW 2] The queue index for registration on Aux1 counter flag. */
3855#define XCM_REG_AUX1_Q 0x20134
3856/* [RW 2] Per each decision rule the queue index to register to. */
3857#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0
3858/* [R 5] Used to read the XX protection CAM occupancy counter. */
3859#define XCM_REG_CAM_OCCUP 0x20244
3860/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
3861 disregarded; valid output is deasserted; all other signals are treated as
3862 usual; if 1 - normal activity. */
3863#define XCM_REG_CDU_AG_RD_IFEN 0x20044
3864/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
3865 are disregarded; all other signals are treated as usual; if 1 - normal
3866 activity. */
3867#define XCM_REG_CDU_AG_WR_IFEN 0x20040
3868/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
3869 disregarded; valid output is deasserted; all other signals are treated as
3870 usual; if 1 - normal activity. */
3871#define XCM_REG_CDU_SM_RD_IFEN 0x2004c
3872/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
3873 input is disregarded; all other signals are treated as usual; if 1 -
3874 normal activity. */
3875#define XCM_REG_CDU_SM_WR_IFEN 0x20048
3876/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
3877 the initial credit value; read returns the current value of the credit
3878 counter. Must be initialized to 1 at start-up. */
3879#define XCM_REG_CFC_INIT_CRD 0x20404
3880/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
3881 weight 8 (the most prioritised); 1 stands for weight 1(least
3882 prioritised); 2 stands for weight 2; tc. */
3883#define XCM_REG_CP_WEIGHT 0x200dc
3884/* [RW 1] Input csem Interface enable. If 0 - the valid input is
3885 disregarded; acknowledge output is deasserted; all other signals are
3886 treated as usual; if 1 - normal activity. */
3887#define XCM_REG_CSEM_IFEN 0x20028
3888/* [RC 1] Set at message length mismatch (relative to last indication) at
3889 the csem interface. */
3890#define XCM_REG_CSEM_LENGTH_MIS 0x20228
3891/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
3892 weight 8 (the most prioritised); 1 stands for weight 1(least
3893 prioritised); 2 stands for weight 2; tc. */
3894#define XCM_REG_CSEM_WEIGHT 0x200c4
3895/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
3896 disregarded; acknowledge output is deasserted; all other signals are
3897 treated as usual; if 1 - normal activity. */
3898#define XCM_REG_DORQ_IFEN 0x20030
3899/* [RC 1] Set at message length mismatch (relative to last indication) at
3900 the dorq interface. */
3901#define XCM_REG_DORQ_LENGTH_MIS 0x20230
3902/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
3903 weight 8 (the most prioritised); 1 stands for weight 1(least
3904 prioritised); 2 stands for weight 2; tc. */
3905#define XCM_REG_DORQ_WEIGHT 0x200cc
3906/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
3907#define XCM_REG_ERR_EVNT_ID 0x200b0
3908/* [RW 28] The CM erroneous header for QM and Timers formatting. */
3909#define XCM_REG_ERR_XCM_HDR 0x200ac
3910/* [RW 8] The Event ID for Timers expiration. */
3911#define XCM_REG_EXPR_EVNT_ID 0x200b4
3912/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
3913 writes the initial credit value; read returns the current value of the
3914 credit counter. Must be initialized to 64 at start-up. */
3915#define XCM_REG_FIC0_INIT_CRD 0x2040c
3916/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
3917 writes the initial credit value; read returns the current value of the
3918 credit counter. Must be initialized to 64 at start-up. */
3919#define XCM_REG_FIC1_INIT_CRD 0x20410
3920#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
3921#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
3922#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
3923#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
3924/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
3925 - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
3926 ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
3927 ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
3928#define XCM_REG_GR_ARB_TYPE 0x2020c
3929/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
3930 highest priority is 3. It is supposed that the Channel group is the
3931 compliment of the other 3 groups. */
3932#define XCM_REG_GR_LD0_PR 0x20214
3933/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
3934 highest priority is 3. It is supposed that the Channel group is the
3935 compliment of the other 3 groups. */
3936#define XCM_REG_GR_LD1_PR 0x20218
3937/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
3938 disregarded; acknowledge output is deasserted; all other signals are
3939 treated as usual; if 1 - normal activity. */
3940#define XCM_REG_NIG0_IFEN 0x20038
3941/* [RC 1] Set at message length mismatch (relative to last indication) at
3942 the nig0 interface. */
3943#define XCM_REG_NIG0_LENGTH_MIS 0x20238
3944/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
3945 weight 8 (the most prioritised); 1 stands for weight 1(least
3946 prioritised); 2 stands for weight 2; tc. */
3947#define XCM_REG_NIG0_WEIGHT 0x200d4
3948/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
3949 disregarded; acknowledge output is deasserted; all other signals are
3950 treated as usual; if 1 - normal activity. */
3951#define XCM_REG_NIG1_IFEN 0x2003c
3952/* [RC 1] Set at message length mismatch (relative to last indication) at
3953 the nig1 interface. */
3954#define XCM_REG_NIG1_LENGTH_MIS 0x2023c
3955/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
3956 sent to STORM; for a specific connection type. The double REG-pairs are
3957 used in order to align to STORM context row size of 128 bits. The offset
3958 of these data in the STORM context is always 0. Index _i stands for the
3959 connection type (one of 16). */
3960#define XCM_REG_N_SM_CTX_LD_0 0x20060
3961#define XCM_REG_N_SM_CTX_LD_1 0x20064
3962#define XCM_REG_N_SM_CTX_LD_2 0x20068
3963#define XCM_REG_N_SM_CTX_LD_3 0x2006c
3964#define XCM_REG_N_SM_CTX_LD_4 0x20070
3965#define XCM_REG_N_SM_CTX_LD_5 0x20074
3966/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
3967 acknowledge output is deasserted; all other signals are treated as usual;
3968 if 1 - normal activity. */
3969#define XCM_REG_PBF_IFEN 0x20034
3970/* [RC 1] Set at message length mismatch (relative to last indication) at
3971 the pbf interface. */
3972#define XCM_REG_PBF_LENGTH_MIS 0x20234
3973/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
3974 weight 8 (the most prioritised); 1 stands for weight 1(least
3975 prioritised); 2 stands for weight 2; tc. */
3976#define XCM_REG_PBF_WEIGHT 0x200d0
3977#define XCM_REG_PHYS_QNUM3_0 0x20100
3978#define XCM_REG_PHYS_QNUM3_1 0x20104
3979/* [RW 8] The Event ID for Timers formatting in case of stop done. */
3980#define XCM_REG_STOP_EVNT_ID 0x200b8
3981/* [RC 1] Set at message length mismatch (relative to last indication) at
3982 the STORM interface. */
3983#define XCM_REG_STORM_LENGTH_MIS 0x2021c
3984/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
3985 weight 8 (the most prioritised); 1 stands for weight 1(least
3986 prioritised); 2 stands for weight 2; tc. */
3987#define XCM_REG_STORM_WEIGHT 0x200bc
3988/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
3989 disregarded; acknowledge output is deasserted; all other signals are
3990 treated as usual; if 1 - normal activity. */
3991#define XCM_REG_STORM_XCM_IFEN 0x20010
3992/* [RW 4] Timers output initial credit. Max credit available - 15.Write
3993 writes the initial credit value; read returns the current value of the
3994 credit counter. Must be initialized to 4 at start-up. */
3995#define XCM_REG_TM_INIT_CRD 0x2041c
3996/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
3997 weight 8 (the most prioritised); 1 stands for weight 1(least
3998 prioritised); 2 stands for weight 2; tc. */
3999#define XCM_REG_TM_WEIGHT 0x200ec
4000/* [RW 28] The CM header for Timers expiration command. */
4001#define XCM_REG_TM_XCM_HDR 0x200a8
4002/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
4003 disregarded; acknowledge output is deasserted; all other signals are
4004 treated as usual; if 1 - normal activity. */
4005#define XCM_REG_TM_XCM_IFEN 0x2001c
4006/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
4007 disregarded; acknowledge output is deasserted; all other signals are
4008 treated as usual; if 1 - normal activity. */
4009#define XCM_REG_TSEM_IFEN 0x20024
4010/* [RC 1] Set at message length mismatch (relative to last indication) at
4011 the tsem interface. */
4012#define XCM_REG_TSEM_LENGTH_MIS 0x20224
4013/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
4014 weight 8 (the most prioritised); 1 stands for weight 1(least
4015 prioritised); 2 stands for weight 2; tc. */
4016#define XCM_REG_TSEM_WEIGHT 0x200c0
4017/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
4018#define XCM_REG_UNA_GT_NXT_Q 0x20120
4019/* [RW 1] Input usem Interface enable. If 0 - the valid input is
4020 disregarded; acknowledge output is deasserted; all other signals are
4021 treated as usual; if 1 - normal activity. */
4022#define XCM_REG_USEM_IFEN 0x2002c
4023/* [RC 1] Message length mismatch (relative to last indication) at the usem
4024 interface. */
4025#define XCM_REG_USEM_LENGTH_MIS 0x2022c
4026/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
4027 weight 8 (the most prioritised); 1 stands for weight 1(least
4028 prioritised); 2 stands for weight 2; tc. */
4029#define XCM_REG_USEM_WEIGHT 0x200c8
4030#define XCM_REG_WU_DA_CNT_CMD00 0x201d4
4031#define XCM_REG_WU_DA_CNT_CMD01 0x201d8
4032#define XCM_REG_WU_DA_CNT_CMD10 0x201dc
4033#define XCM_REG_WU_DA_CNT_CMD11 0x201e0
4034#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
4035#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
4036#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
4037#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
4038#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
4039#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
4040#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
4041#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
4042/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
4043 acknowledge output is deasserted; all other signals are treated as usual;
4044 if 1 - normal activity. */
4045#define XCM_REG_XCM_CFC_IFEN 0x20050
4046/* [RW 14] Interrupt mask register #0 read/write */
4047#define XCM_REG_XCM_INT_MASK 0x202b4
4048/* [R 14] Interrupt register #0 read */
4049#define XCM_REG_XCM_INT_STS 0x202a8
4050/* [R 30] Parity register #0 read */
4051#define XCM_REG_XCM_PRTY_STS 0x202b8
4052/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
4053 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
4054 Is used to determine the number of the AG context REG-pairs written back;
4055 when the Reg1WbFlg isn't set. */
4056#define XCM_REG_XCM_REG0_SZ 0x200f4
4057/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
4058 disregarded; valid is deasserted; all other signals are treated as usual;
4059 if 1 - normal activity. */
4060#define XCM_REG_XCM_STORM0_IFEN 0x20004
4061/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
4062 disregarded; valid is deasserted; all other signals are treated as usual;
4063 if 1 - normal activity. */
4064#define XCM_REG_XCM_STORM1_IFEN 0x20008
4065/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
4066 disregarded; acknowledge output is deasserted; all other signals are
4067 treated as usual; if 1 - normal activity. */
4068#define XCM_REG_XCM_TM_IFEN 0x20020
4069/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
4070 disregarded; valid is deasserted; all other signals are treated as usual;
4071 if 1 - normal activity. */
4072#define XCM_REG_XCM_XQM_IFEN 0x2000c
4073/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
4074#define XCM_REG_XCM_XQM_USE_Q 0x200f0
4075/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
4076#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc
4077/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
4078 the initial credit value; read returns the current value of the credit
4079 counter. Must be initialized to 32 at start-up. */
4080#define XCM_REG_XQM_INIT_CRD 0x20420
4081/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
4082 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4083 prioritised); 2 stands for weight 2; tc. */
4084#define XCM_REG_XQM_P_WEIGHT 0x200e4
4085/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
4086 stands for weight 8 (the most prioritised); 1 stands for weight 1(least
4087 prioritised); 2 stands for weight 2; tc. */
4088#define XCM_REG_XQM_S_WEIGHT 0x200e8
4089/* [RW 28] The CM header value for QM request (primary). */
4090#define XCM_REG_XQM_XCM_HDR_P 0x200a0
4091/* [RW 28] The CM header value for QM request (secondary). */
4092#define XCM_REG_XQM_XCM_HDR_S 0x200a4
4093/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
4094 acknowledge output is deasserted; all other signals are treated as usual;
4095 if 1 - normal activity. */
4096#define XCM_REG_XQM_XCM_IFEN 0x20014
4097/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
4098 acknowledge output is deasserted; all other signals are treated as usual;
4099 if 1 - normal activity. */
4100#define XCM_REG_XSDM_IFEN 0x20018
4101/* [RC 1] Set at message length mismatch (relative to last indication) at
4102 the SDM interface. */
4103#define XCM_REG_XSDM_LENGTH_MIS 0x20220
4104/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
4105 weight 8 (the most prioritised); 1 stands for weight 1(least
4106 prioritised); 2 stands for weight 2; tc. */
4107#define XCM_REG_XSDM_WEIGHT 0x200e0
4108/* [RW 17] Indirect access to the descriptor table of the XX protection
4109 mechanism. The fields are: [5:0] - message length; 11:6] - message
4110 pointer; 16:12] - next pointer. */
4111#define XCM_REG_XX_DESCR_TABLE 0x20480
4112#define XCM_REG_XX_DESCR_TABLE_SIZE 32
4113/* [R 6] Used to read the XX protection Free counter. */
4114#define XCM_REG_XX_FREE 0x20240
4115/* [RW 6] Initial value for the credit counter; responsible for fulfilling
4116 of the Input Stage XX protection buffer by the XX protection pending
4117 messages. Max credit available - 3.Write writes the initial credit value;
4118 read returns the current value of the credit counter. Must be initialized
4119 to 2 at start-up. */
4120#define XCM_REG_XX_INIT_CRD 0x20424
4121/* [RW 6] The maximum number of pending messages; which may be stored in XX
4122 protection. ~xcm_registers_xx_free.xx_free read on read. */
4123#define XCM_REG_XX_MSG_NUM 0x20428
4124/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
4125#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
4126/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
4127 The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
4128 header pointer. */
4129#define XCM_REG_XX_TABLE 0x20500
4130/* [RW 8] The event id for aggregated interrupt 0 */
4131#define XSDM_REG_AGG_INT_EVENT_0 0x166038
4132#define XSDM_REG_AGG_INT_EVENT_1 0x16603c
4133#define XSDM_REG_AGG_INT_EVENT_10 0x166060
4134#define XSDM_REG_AGG_INT_EVENT_11 0x166064
4135#define XSDM_REG_AGG_INT_EVENT_12 0x166068
4136#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
4137#define XSDM_REG_AGG_INT_EVENT_14 0x166070
4138#define XSDM_REG_AGG_INT_EVENT_2 0x166040
4139#define XSDM_REG_AGG_INT_EVENT_3 0x166044
4140#define XSDM_REG_AGG_INT_EVENT_4 0x166048
4141#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
4142#define XSDM_REG_AGG_INT_EVENT_6 0x166050
4143#define XSDM_REG_AGG_INT_EVENT_7 0x166054
4144#define XSDM_REG_AGG_INT_EVENT_8 0x166058
4145#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
4146/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
4147 or auto-mask-mode (1) */
4148#define XSDM_REG_AGG_INT_MODE_0 0x1661b8
4149#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
4150/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
4151#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
4152/* [RW 16] The maximum value of the competion counter #0 */
4153#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
4154/* [RW 16] The maximum value of the competion counter #1 */
4155#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
4156/* [RW 16] The maximum value of the competion counter #2 */
4157#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
4158/* [RW 16] The maximum value of the competion counter #3 */
4159#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
4160/* [RW 13] The start address in the internal RAM for the completion
4161 counters. */
4162#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c
4163#define XSDM_REG_ENABLE_IN1 0x166238
4164#define XSDM_REG_ENABLE_IN2 0x16623c
4165#define XSDM_REG_ENABLE_OUT1 0x166240
4166#define XSDM_REG_ENABLE_OUT2 0x166244
4167/* [RW 4] The initial number of messages that can be sent to the pxp control
4168 interface without receiving any ACK. */
4169#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc
4170/* [ST 32] The number of ACK after placement messages received */
4171#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c
4172/* [ST 32] The number of packet end messages received from the parser */
4173#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274
4174/* [ST 32] The number of requests received from the pxp async if */
4175#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278
4176/* [ST 32] The number of commands received in queue 0 */
4177#define XSDM_REG_NUM_OF_Q0_CMD 0x166248
4178/* [ST 32] The number of commands received in queue 10 */
4179#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c
4180/* [ST 32] The number of commands received in queue 11 */
4181#define XSDM_REG_NUM_OF_Q11_CMD 0x166270
4182/* [ST 32] The number of commands received in queue 1 */
4183#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c
4184/* [ST 32] The number of commands received in queue 3 */
4185#define XSDM_REG_NUM_OF_Q3_CMD 0x166250
4186/* [ST 32] The number of commands received in queue 4 */
4187#define XSDM_REG_NUM_OF_Q4_CMD 0x166254
4188/* [ST 32] The number of commands received in queue 5 */
4189#define XSDM_REG_NUM_OF_Q5_CMD 0x166258
4190/* [ST 32] The number of commands received in queue 6 */
4191#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c
4192/* [ST 32] The number of commands received in queue 7 */
4193#define XSDM_REG_NUM_OF_Q7_CMD 0x166260
4194/* [ST 32] The number of commands received in queue 8 */
4195#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
4196/* [ST 32] The number of commands received in queue 9 */
4197#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
4198/* [RW 13] The start address in the internal RAM for queue counters */
4199#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
4200/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
4201#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
4202/* [R 1] parser fifo empty in sdm_sync block */
4203#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550
4204/* [R 1] parser serial fifo empty in sdm_sync block */
4205#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558
4206/* [RW 32] Tick for timer counter. Applicable only when
4207 ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
4208#define XSDM_REG_TIMER_TICK 0x166000
4209/* [RW 32] Interrupt mask register #0 read/write */
4210#define XSDM_REG_XSDM_INT_MASK_0 0x16629c
4211#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
4212/* [R 32] Interrupt register #0 read */
4213#define XSDM_REG_XSDM_INT_STS_0 0x166290
4214#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
4215/* [RW 11] Parity mask register #0 read/write */
4216#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
4217/* [R 11] Parity register #0 read */
4218#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
4219/* [RW 5] The number of time_slots in the arbitration cycle */
4220#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
4221/* [RW 3] The source that is associated with arbitration element 0. Source
4222 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4223 sleeping thread with priority 1; 4- sleeping thread with priority 2 */
4224#define XSEM_REG_ARB_ELEMENT0 0x280020
4225/* [RW 3] The source that is associated with arbitration element 1. Source
4226 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4227 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4228 Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
4229#define XSEM_REG_ARB_ELEMENT1 0x280024
4230/* [RW 3] The source that is associated with arbitration element 2. Source
4231 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4232 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4233 Could not be equal to register ~xsem_registers_arb_element0.arb_element0
4234 and ~xsem_registers_arb_element1.arb_element1 */
4235#define XSEM_REG_ARB_ELEMENT2 0x280028
4236/* [RW 3] The source that is associated with arbitration element 3. Source
4237 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4238 sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
4239 not be equal to register ~xsem_registers_arb_element0.arb_element0 and
4240 ~xsem_registers_arb_element1.arb_element1 and
4241 ~xsem_registers_arb_element2.arb_element2 */
4242#define XSEM_REG_ARB_ELEMENT3 0x28002c
4243/* [RW 3] The source that is associated with arbitration element 4. Source
4244 decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
4245 sleeping thread with priority 1; 4- sleeping thread with priority 2.
4246 Could not be equal to register ~xsem_registers_arb_element0.arb_element0
4247 and ~xsem_registers_arb_element1.arb_element1 and
4248 ~xsem_registers_arb_element2.arb_element2 and
4249 ~xsem_registers_arb_element3.arb_element3 */
4250#define XSEM_REG_ARB_ELEMENT4 0x280030
4251#define XSEM_REG_ENABLE_IN 0x2800a4
4252#define XSEM_REG_ENABLE_OUT 0x2800a8
4253/* [RW 32] This address space contains all registers and memories that are
4254 placed in SEM_FAST block. The SEM_FAST registers are described in
4255 appendix B. In order to access the sem_fast registers the base address
4256 ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
4257#define XSEM_REG_FAST_MEMORY 0x2a0000
4258/* [RW 1] Disables input messages from FIC0 May be updated during run_time
4259 by the microcode */
4260#define XSEM_REG_FIC0_DISABLE 0x280224
4261/* [RW 1] Disables input messages from FIC1 May be updated during run_time
4262 by the microcode */
4263#define XSEM_REG_FIC1_DISABLE 0x280234
4264/* [RW 15] Interrupt table Read and write access to it is not possible in
4265 the middle of the work */
4266#define XSEM_REG_INT_TABLE 0x280400
4267/* [ST 24] Statistics register. The number of messages that entered through
4268 FIC0 */
4269#define XSEM_REG_MSG_NUM_FIC0 0x280000
4270/* [ST 24] Statistics register. The number of messages that entered through
4271 FIC1 */
4272#define XSEM_REG_MSG_NUM_FIC1 0x280004
4273/* [ST 24] Statistics register. The number of messages that were sent to
4274 FOC0 */
4275#define XSEM_REG_MSG_NUM_FOC0 0x280008
4276/* [ST 24] Statistics register. The number of messages that were sent to
4277 FOC1 */
4278#define XSEM_REG_MSG_NUM_FOC1 0x28000c
4279/* [ST 24] Statistics register. The number of messages that were sent to
4280 FOC2 */
4281#define XSEM_REG_MSG_NUM_FOC2 0x280010
4282/* [ST 24] Statistics register. The number of messages that were sent to
4283 FOC3 */
4284#define XSEM_REG_MSG_NUM_FOC3 0x280014
4285/* [RW 1] Disables input messages from the passive buffer May be updated
4286 during run_time by the microcode */
4287#define XSEM_REG_PAS_DISABLE 0x28024c
4288/* [WB 128] Debug only. Passive buffer memory */
4289#define XSEM_REG_PASSIVE_BUFFER 0x282000
4290/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
4291#define XSEM_REG_PRAM 0x2c0000
4292/* [R 16] Valid sleeping threads indication have bit per thread */
4293#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c
4294/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
4295#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0
4296/* [RW 16] List of free threads . There is a bit per thread. */
4297#define XSEM_REG_THREADS_LIST 0x2802e4
4298/* [RW 3] The arbitration scheme of time_slot 0 */
4299#define XSEM_REG_TS_0_AS 0x280038
4300/* [RW 3] The arbitration scheme of time_slot 10 */
4301#define XSEM_REG_TS_10_AS 0x280060
4302/* [RW 3] The arbitration scheme of time_slot 11 */
4303#define XSEM_REG_TS_11_AS 0x280064
4304/* [RW 3] The arbitration scheme of time_slot 12 */
4305#define XSEM_REG_TS_12_AS 0x280068
4306/* [RW 3] The arbitration scheme of time_slot 13 */
4307#define XSEM_REG_TS_13_AS 0x28006c
4308/* [RW 3] The arbitration scheme of time_slot 14 */
4309#define XSEM_REG_TS_14_AS 0x280070
4310/* [RW 3] The arbitration scheme of time_slot 15 */
4311#define XSEM_REG_TS_15_AS 0x280074
4312/* [RW 3] The arbitration scheme of time_slot 16 */
4313#define XSEM_REG_TS_16_AS 0x280078
4314/* [RW 3] The arbitration scheme of time_slot 17 */
4315#define XSEM_REG_TS_17_AS 0x28007c
4316/* [RW 3] The arbitration scheme of time_slot 18 */
4317#define XSEM_REG_TS_18_AS 0x280080
4318/* [RW 3] The arbitration scheme of time_slot 1 */
4319#define XSEM_REG_TS_1_AS 0x28003c
4320/* [RW 3] The arbitration scheme of time_slot 2 */
4321#define XSEM_REG_TS_2_AS 0x280040
4322/* [RW 3] The arbitration scheme of time_slot 3 */
4323#define XSEM_REG_TS_3_AS 0x280044
4324/* [RW 3] The arbitration scheme of time_slot 4 */
4325#define XSEM_REG_TS_4_AS 0x280048
4326/* [RW 3] The arbitration scheme of time_slot 5 */
4327#define XSEM_REG_TS_5_AS 0x28004c
4328/* [RW 3] The arbitration scheme of time_slot 6 */
4329#define XSEM_REG_TS_6_AS 0x280050
4330/* [RW 3] The arbitration scheme of time_slot 7 */
4331#define XSEM_REG_TS_7_AS 0x280054
4332/* [RW 3] The arbitration scheme of time_slot 8 */
4333#define XSEM_REG_TS_8_AS 0x280058
4334/* [RW 3] The arbitration scheme of time_slot 9 */
4335#define XSEM_REG_TS_9_AS 0x28005c
4336/* [RW 32] Interrupt mask register #0 read/write */
4337#define XSEM_REG_XSEM_INT_MASK_0 0x280110
4338#define XSEM_REG_XSEM_INT_MASK_1 0x280120
4339/* [R 32] Interrupt register #0 read */
4340#define XSEM_REG_XSEM_INT_STS_0 0x280104
4341#define XSEM_REG_XSEM_INT_STS_1 0x280114
4342/* [RW 32] Parity mask register #0 read/write */
4343#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
4344#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
4345/* [R 32] Parity register #0 read */
4346#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
4347#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
4348#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
4349#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
4350#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
4351#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
4352#define MCPR_NVM_COMMAND_DOIT (1L<<4)
4353#define MCPR_NVM_COMMAND_DONE (1L<<3)
4354#define MCPR_NVM_COMMAND_FIRST (1L<<7)
4355#define MCPR_NVM_COMMAND_LAST (1L<<8)
4356#define MCPR_NVM_COMMAND_WR (1L<<5)
4357#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
4358#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
4359#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
4360#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
4361#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
4362#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
4363#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
4364#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
4365#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
4366#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
4367#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
4368#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
4369#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
4370#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
4371#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
4372#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
4373#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
4374#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
4375#define EMAC_LED_100MB_OVERRIDE (1L<<2)
4376#define EMAC_LED_10MB_OVERRIDE (1L<<3)
4377#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
4378#define EMAC_LED_OVERRIDE (1L<<0)
4379#define EMAC_LED_TRAFFIC (1L<<6)
4380#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
4381#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
4382#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
4383#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
4384#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
4385#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
4386#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
4387#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16)
4388#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
4389#define EMAC_MODE_25G_MODE (1L<<5)
4390#define EMAC_MODE_HALF_DUPLEX (1L<<1)
4391#define EMAC_MODE_PORT_GMII (2L<<2)
4392#define EMAC_MODE_PORT_MII (1L<<2)
4393#define EMAC_MODE_PORT_MII_10M (3L<<2)
4394#define EMAC_MODE_RESET (1L<<0)
4395#define EMAC_REG_EMAC_LED 0xc
4396#define EMAC_REG_EMAC_MAC_MATCH 0x10
4397#define EMAC_REG_EMAC_MDIO_COMM 0xac
4398#define EMAC_REG_EMAC_MDIO_MODE 0xb4
4399#define EMAC_REG_EMAC_MODE 0x0
4400#define EMAC_REG_EMAC_RX_MODE 0xc8
4401#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
4402#define EMAC_REG_EMAC_RX_STAT_AC 0x180
4403#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
4404#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
4405#define EMAC_REG_EMAC_TX_MODE 0xbc
4406#define EMAC_REG_EMAC_TX_STAT_AC 0x280
4407#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
4408#define EMAC_RX_MODE_FLOW_EN (1L<<2)
4409#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
4410#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
4411#define EMAC_RX_MODE_RESET (1L<<0)
4412#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
4413#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
4414#define EMAC_TX_MODE_FLOW_EN (1L<<4)
4415#define EMAC_TX_MODE_RESET (1L<<0)
4416#define MISC_REGISTERS_GPIO_0 0
4417#define MISC_REGISTERS_GPIO_1 1
4418#define MISC_REGISTERS_GPIO_2 2
4419#define MISC_REGISTERS_GPIO_3 3
4420#define MISC_REGISTERS_GPIO_CLR_POS 16
4421#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
4422#define MISC_REGISTERS_GPIO_FLOAT_POS 24
4423#define MISC_REGISTERS_GPIO_HIGH 1
4424#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
4425#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
4426#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
4427#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
4428#define MISC_REGISTERS_GPIO_INT_SET_POS 16
4429#define MISC_REGISTERS_GPIO_LOW 0
4430#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
4431#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
4432#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4433#define MISC_REGISTERS_GPIO_SET_POS 8
4434#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4435#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
4436#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4437#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
4438#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
4439#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4440#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4441#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
4442#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
4443#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
4444#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
4445#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
4446#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
4447#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
4448#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
4449#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
4450#define MISC_REGISTERS_RESET_REG_2_SET 0x594
4451#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
4452#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
4453#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
4454#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
4455#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
4456#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
4457#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
4458#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
4459#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
4460#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
4461#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
4462#define MISC_REGISTERS_SPIO_4 4
4463#define MISC_REGISTERS_SPIO_5 5
4464#define MISC_REGISTERS_SPIO_7 7
4465#define MISC_REGISTERS_SPIO_CLR_POS 16
4466#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
4467#define MISC_REGISTERS_SPIO_FLOAT_POS 24
4468#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
4469#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
4470#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
4471#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
4472#define MISC_REGISTERS_SPIO_SET_POS 8
4473#define HW_LOCK_MAX_RESOURCE_VALUE 31
4474#define HW_LOCK_RESOURCE_GPIO 1
4475#define HW_LOCK_RESOURCE_MDIO 0
4476#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
4477#define HW_LOCK_RESOURCE_RESERVED_08 8
4478#define HW_LOCK_RESOURCE_SPIO 2
4479#define HW_LOCK_RESOURCE_UNDI 5
4480#define PRS_FLAG_OVERETH_IPV4 1
4481#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4482#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4483#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
4484#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (1<<8)
4485#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (1<<7)
4486#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (1<<6)
4487#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (1<<29)
4488#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (1<<28)
4489#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (1<<1)
4490#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (1<<0)
4491#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (1<<18)
4492#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (1<<11)
4493#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (1<<13)
4494#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (1<<12)
4495#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
4496#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
4497#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
4498#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
4499#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
4500#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
4501#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
4502#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
4503#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
4504#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
4505#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0)
4506#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31)
4507#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3)
4508#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2)
4509#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5)
4510#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (1<<4)
4511#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3)
4512#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2)
4513#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22)
4514#define AEU_INPUTS_ATTN_BITS_SPIO5 (1<<15)
4515#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27)
4516#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5)
4517#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25)
4518#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (1<<24)
4519#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (1<<29)
4520#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (1<<28)
4521#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (1<<23)
4522#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (1<<27)
4523#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (1<<26)
4524#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (1<<21)
4525#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (1<<20)
4526#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (1<<25)
4527#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (1<<24)
4528#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (1<<16)
4529#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (1<<9)
4530#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (1<<7)
4531#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (1<<6)
4532#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (1<<11)
4533#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (1<<10)
4534#define RESERVED_GENERAL_ATTENTION_BIT_0 0
4535
4536#define EVEREST_GEN_ATTN_IN_USE_MASK 0x3ffe0
4537#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
4538
4539#define RESERVED_GENERAL_ATTENTION_BIT_6 6
4540#define RESERVED_GENERAL_ATTENTION_BIT_7 7
4541#define RESERVED_GENERAL_ATTENTION_BIT_8 8
4542#define RESERVED_GENERAL_ATTENTION_BIT_9 9
4543#define RESERVED_GENERAL_ATTENTION_BIT_10 10
4544#define RESERVED_GENERAL_ATTENTION_BIT_11 11
4545#define RESERVED_GENERAL_ATTENTION_BIT_12 12
4546#define RESERVED_GENERAL_ATTENTION_BIT_13 13
4547#define RESERVED_GENERAL_ATTENTION_BIT_14 14
4548#define RESERVED_GENERAL_ATTENTION_BIT_15 15
4549#define RESERVED_GENERAL_ATTENTION_BIT_16 16
4550#define RESERVED_GENERAL_ATTENTION_BIT_17 17
4551#define RESERVED_GENERAL_ATTENTION_BIT_18 18
4552#define RESERVED_GENERAL_ATTENTION_BIT_19 19
4553#define RESERVED_GENERAL_ATTENTION_BIT_20 20
4554#define RESERVED_GENERAL_ATTENTION_BIT_21 21
4555
4556/* storm asserts attention bits */
4557#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
4558#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
4559#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
4560#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
4561
4562/* mcp error attention bit */
4563#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
4564
4565/*E1H NIG status sync attention mapped to group 4-7*/
4566#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
4567#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
4568#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
4569#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
4570#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
4571#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
4572#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
4573#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
4574
4575
4576#define LATCHED_ATTN_RBCR 23
4577#define LATCHED_ATTN_RBCT 24
4578#define LATCHED_ATTN_RBCN 25
4579#define LATCHED_ATTN_RBCU 26
4580#define LATCHED_ATTN_RBCP 27
4581#define LATCHED_ATTN_TIMEOUT_GRC 28
4582#define LATCHED_ATTN_RSVD_GRC 29
4583#define LATCHED_ATTN_ROM_PARITY_MCP 30
4584#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
4585#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
4586#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
4587
4588#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
4589#define GENERAL_ATTEN_OFFSET(atten_name)\
4590 (1UL << ((94 + atten_name) % 32))
4591/*
4592 * This file defines GRC base address for every block.
4593 * This file is included by chipsim, asm microcode and cpp microcode.
4594 * These values are used in Design.xml on regBase attribute
4595 * Use the base with the generated offsets of specific registers.
4596 */
4597
4598#define GRCBASE_PXPCS 0x000000
4599#define GRCBASE_PCICONFIG 0x002000
4600#define GRCBASE_PCIREG 0x002400
4601#define GRCBASE_EMAC0 0x008000
4602#define GRCBASE_EMAC1 0x008400
4603#define GRCBASE_DBU 0x008800
4604#define GRCBASE_MISC 0x00A000
4605#define GRCBASE_DBG 0x00C000
4606#define GRCBASE_NIG 0x010000
4607#define GRCBASE_XCM 0x020000
4608#define GRCBASE_PRS 0x040000
4609#define GRCBASE_SRCH 0x040400
4610#define GRCBASE_TSDM 0x042000
4611#define GRCBASE_TCM 0x050000
4612#define GRCBASE_BRB1 0x060000
4613#define GRCBASE_MCP 0x080000
4614#define GRCBASE_UPB 0x0C1000
4615#define GRCBASE_CSDM 0x0C2000
4616#define GRCBASE_USDM 0x0C4000
4617#define GRCBASE_CCM 0x0D0000
4618#define GRCBASE_UCM 0x0E0000
4619#define GRCBASE_CDU 0x101000
4620#define GRCBASE_DMAE 0x102000
4621#define GRCBASE_PXP 0x103000
4622#define GRCBASE_CFC 0x104000
4623#define GRCBASE_HC 0x108000
4624#define GRCBASE_PXP2 0x120000
4625#define GRCBASE_PBF 0x140000
4626#define GRCBASE_XPB 0x161000
4627#define GRCBASE_TIMERS 0x164000
4628#define GRCBASE_XSDM 0x166000
4629#define GRCBASE_QM 0x168000
4630#define GRCBASE_DQ 0x170000
4631#define GRCBASE_TSEM 0x180000
4632#define GRCBASE_CSEM 0x200000
4633#define GRCBASE_XSEM 0x280000
4634#define GRCBASE_USEM 0x300000
4635#define GRCBASE_MISC_AEU GRCBASE_MISC
4636
4637
4638/* offset of configuration space in the pci core register */
4639#define PCICFG_OFFSET 0x2000
4640#define PCICFG_VENDOR_ID_OFFSET 0x00
4641#define PCICFG_DEVICE_ID_OFFSET 0x02
4642#define PCICFG_COMMAND_OFFSET 0x04
4643#define PCICFG_COMMAND_IO_SPACE (1<<0)
4644#define PCICFG_COMMAND_MEM_SPACE (1<<1)
4645#define PCICFG_COMMAND_BUS_MASTER (1<<2)
4646#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
4647#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
4648#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
4649#define PCICFG_COMMAND_PERR_ENA (1<<6)
4650#define PCICFG_COMMAND_STEPPING (1<<7)
4651#define PCICFG_COMMAND_SERR_ENA (1<<8)
4652#define PCICFG_COMMAND_FAST_B2B (1<<9)
4653#define PCICFG_COMMAND_INT_DISABLE (1<<10)
4654#define PCICFG_COMMAND_RESERVED (0x1f<<11)
4655#define PCICFG_STATUS_OFFSET 0x06
4656#define PCICFG_REVESION_ID_OFFSET 0x08
4657#define PCICFG_CACHE_LINE_SIZE 0x0c
4658#define PCICFG_LATENCY_TIMER 0x0d
4659#define PCICFG_BAR_1_LOW 0x10
4660#define PCICFG_BAR_1_HIGH 0x14
4661#define PCICFG_BAR_2_LOW 0x18
4662#define PCICFG_BAR_2_HIGH 0x1c
4663#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
4664#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
4665#define PCICFG_INT_LINE 0x3c
4666#define PCICFG_INT_PIN 0x3d
4667#define PCICFG_PM_CAPABILITY 0x48
4668#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
4669#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
4670#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
4671#define PCICFG_PM_CAPABILITY_DSI (1<<21)
4672#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
4673#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
4674#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
4675#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
4676#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
4677#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
4678#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
4679#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
4680#define PCICFG_PM_CSR_OFFSET 0x4c
4681#define PCICFG_PM_CSR_STATE (0x3<<0)
4682#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
4683#define PCICFG_PM_CSR_PME_STATUS (1<<15)
4684#define PCICFG_MSI_CAP_ID_OFFSET 0x58
4685#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
4686#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
4687#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
4688#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
4689#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
4690#define PCICFG_GRC_ADDRESS 0x78
4691#define PCICFG_GRC_DATA 0x80
4692#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
4693#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
4694#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
4695#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
4696#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
4697
4698#define PCICFG_DEVICE_CONTROL 0xb4
4699#define PCICFG_DEVICE_STATUS 0xb6
4700#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
4701#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
4702#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
4703#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
4704#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
4705#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
4706#define PCICFG_LINK_CONTROL 0xbc
4707
4708
4709#define BAR_USTRORM_INTMEM 0x400000
4710#define BAR_CSTRORM_INTMEM 0x410000
4711#define BAR_XSTRORM_INTMEM 0x420000
4712#define BAR_TSTRORM_INTMEM 0x430000
4713
4714/* for accessing the IGU in case of status block ACK */
4715#define BAR_IGU_INTMEM 0x440000
4716
4717#define BAR_DOORBELL_OFFSET 0x800000
4718
4719#define BAR_ME_REGISTER 0x450000
4720
4721/* config_2 offset */
4722#define GRC_CONFIG_2_SIZE_REG 0x408
4723#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
4724#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
4725#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
4726#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
4727#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
4728#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
4729#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
4730#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
4731#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
4732#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
4733#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
4734#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
4735#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
4736#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
4737#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
4738#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
4739#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
4740#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
4741#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
4742#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
4743#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
4744#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
4745#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
4746#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
4747#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
4748#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
4749#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
4750#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
4751#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
4752#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
4753#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
4754#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
4755#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
4756#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
4757#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
4758#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
4759#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
4760#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
4761#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
4762#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
4763
4764/* config_3 offset */
4765#define GRC_CONFIG_3_SIZE_REG 0x40c
4766#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
4767#define PCI_CONFIG_3_FORCE_PME (1L<<24)
4768#define PCI_CONFIG_3_PME_STATUS (1L<<25)
4769#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
4770#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
4771#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
4772#define PCI_CONFIG_3_PCI_POWER (1L<<31)
4773
4774#define GRC_BAR2_CONFIG 0x4e0
4775#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
4776#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
4777#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
4778#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
4779#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
4780#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
4781#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
4782#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
4783#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
4784#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
4785#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
4786#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
4787#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
4788#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
4789#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
4790#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
4791#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
4792#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
4793
4794#define PCI_PM_DATA_A 0x410
4795#define PCI_PM_DATA_B 0x414
4796#define PCI_ID_VAL1 0x434
4797#define PCI_ID_VAL2 0x438
4798
4799
4800#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4801#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
4802#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
4803#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
4804#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
4805
4806#define MDIO_REG_BANK_CL73_IEEEB1 0x10
4807#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
4808#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
4809#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
4810#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
4811#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
4812#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
4813#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
4814#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
4815#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
4816#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
4817#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
4818#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
4819#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
4820#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
4821#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
4822
4823#define MDIO_REG_BANK_RX0 0x80b0
4824#define MDIO_RX0_RX_STATUS 0x10
4825#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
4826#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
4827#define MDIO_RX0_RX_EQ_BOOST 0x1c
4828#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4829#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
4830
4831#define MDIO_REG_BANK_RX1 0x80c0
4832#define MDIO_RX1_RX_EQ_BOOST 0x1c
4833#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4834#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
4835
4836#define MDIO_REG_BANK_RX2 0x80d0
4837#define MDIO_RX2_RX_EQ_BOOST 0x1c
4838#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4839#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
4840
4841#define MDIO_REG_BANK_RX3 0x80e0
4842#define MDIO_RX3_RX_EQ_BOOST 0x1c
4843#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4844#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
4845
4846#define MDIO_REG_BANK_RX_ALL 0x80f0
4847#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
4848#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
4849#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
4850
4851#define MDIO_REG_BANK_TX0 0x8060
4852#define MDIO_TX0_TX_DRIVER 0x17
4853#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4854#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4855#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4856#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4857#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4858#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4859#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4860#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4861#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4862
4863#define MDIO_REG_BANK_TX1 0x8070
4864#define MDIO_TX1_TX_DRIVER 0x17
4865#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4866#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4867#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4868#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4869#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4870#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4871#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4872#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4873#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4874
4875#define MDIO_REG_BANK_TX2 0x8080
4876#define MDIO_TX2_TX_DRIVER 0x17
4877#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4878#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4879#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4880#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4881#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4882#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4883#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4884#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4885#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4886
4887#define MDIO_REG_BANK_TX3 0x8090
4888#define MDIO_TX3_TX_DRIVER 0x17
4889#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
4890#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
4891#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
4892#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
4893#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
4894#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
4895#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
4896#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
4897#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
4898
4899#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
4900#define MDIO_BLOCK0_XGXS_CONTROL 0x10
4901
4902#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
4903#define MDIO_BLOCK1_LANE_CTRL0 0x15
4904#define MDIO_BLOCK1_LANE_CTRL1 0x16
4905#define MDIO_BLOCK1_LANE_CTRL2 0x17
4906#define MDIO_BLOCK1_LANE_PRBS 0x19
4907
4908#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
4909#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
4910#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
4911#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
4912#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
4913#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
4914#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
4915#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
4916#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
4917#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
4918
4919#define MDIO_REG_BANK_GP_STATUS 0x8120
4920#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
4921#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
4922#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
4923#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
4924#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
4925#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
4926#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
4927#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
4928#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
4929#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
4930#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
4931#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
4932#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
4933#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
4934#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
4935#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
4936#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
4937#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
4938#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
4939#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
4940#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
4941#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
4942#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
4943#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
4944#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
4945
4946
4947#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
4948#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
4949#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
4950#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
4951#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
4952#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
4953#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
4954
4955#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
4956#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
4957#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
4958#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
4959#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
4960#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
4961#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
4962#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
4963#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
4964#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
4965#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
4966#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
4967#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
4968#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
4969#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
4970#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
4971#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
4972#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
4973#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
4974#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
4975#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
4976#define MDIO_SERDES_DIGITAL_MISC1 0x18
4977#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
4978#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
4979#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
4980#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
4981#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
4982#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
4983#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
4984#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
4985#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
4986#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
4987#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
4988#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
4989#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
4990#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
4991#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
4992#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
4993#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
4994#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
4995
4996#define MDIO_REG_BANK_OVER_1G 0x8320
4997#define MDIO_OVER_1G_DIGCTL_3_4 0x14
4998#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
4999#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
5000#define MDIO_OVER_1G_UP1 0x19
5001#define MDIO_OVER_1G_UP1_2_5G 0x0001
5002#define MDIO_OVER_1G_UP1_5G 0x0002
5003#define MDIO_OVER_1G_UP1_6G 0x0004
5004#define MDIO_OVER_1G_UP1_10G 0x0010
5005#define MDIO_OVER_1G_UP1_10GH 0x0008
5006#define MDIO_OVER_1G_UP1_12G 0x0020
5007#define MDIO_OVER_1G_UP1_12_5G 0x0040
5008#define MDIO_OVER_1G_UP1_13G 0x0080
5009#define MDIO_OVER_1G_UP1_15G 0x0100
5010#define MDIO_OVER_1G_UP1_16G 0x0200
5011#define MDIO_OVER_1G_UP2 0x1A
5012#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
5013#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
5014#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
5015#define MDIO_OVER_1G_UP3 0x1B
5016#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
5017#define MDIO_OVER_1G_LP_UP1 0x1C
5018#define MDIO_OVER_1G_LP_UP2 0x1D
5019#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
5020#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
5021#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
5022#define MDIO_OVER_1G_LP_UP3 0x1E
5023
5024#define MDIO_REG_BANK_REMOTE_PHY 0x8330
5025#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
5026#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
5027#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
5028
5029#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
5030#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
5031#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
5032#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
5033
5034#define MDIO_REG_BANK_CL73_USERB0 0x8370
5035#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
5036#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
5037#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
5038#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
5039#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
5040#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
5041#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
5042#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
5043#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
5044#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
5045#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
5046
5047#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
5048#define MDIO_AER_BLOCK_AER_REG 0x1E
5049
5050#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
5051#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
5052#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
5053#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
5054#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
5055#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
5056#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
5057#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
5058#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
5059#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
5060#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
5061#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
5062#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
5063#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
5064#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
5065#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
5066#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
5067#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
5068#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
5069#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
5070#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
5071#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
5072#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
5073#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
5074#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
5075#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
5076#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
5077#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
5078#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
5079#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
5080#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
5081/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
5082bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
5083Theotherbitsarereservedandshouldbezero*/
5084#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
5085
5086
5087#define MDIO_PMA_DEVAD 0x1
5088/*ieee*/
5089#define MDIO_PMA_REG_CTRL 0x0
5090#define MDIO_PMA_REG_STATUS 0x1
5091#define MDIO_PMA_REG_10G_CTRL2 0x7
5092#define MDIO_PMA_REG_RX_SD 0xa
5093/*bcm*/
5094#define MDIO_PMA_REG_BCM_CTRL 0x0096
5095#define MDIO_PMA_REG_FEC_CTRL 0x00ab
5096#define MDIO_PMA_REG_RX_ALARM_CTRL 0x9000
5097#define MDIO_PMA_REG_LASI_CTRL 0x9002
5098#define MDIO_PMA_REG_RX_ALARM 0x9003
5099#define MDIO_PMA_REG_TX_ALARM 0x9004
5100#define MDIO_PMA_REG_LASI_STATUS 0x9005
5101#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
5102#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
5103#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
5104#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
5105#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
5106#define MDIO_PMA_REG_MISC_CTRL 0xca0a
5107#define MDIO_PMA_REG_GEN_CTRL 0xca10
5108#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
5109#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
5110#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
5111#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
5112#define MDIO_PMA_REG_ROM_VER1 0xca19
5113#define MDIO_PMA_REG_ROM_VER2 0xca1a
5114#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
5115#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
5116#define MDIO_PMA_REG_PLL_CTRL 0xca1e
5117#define MDIO_PMA_REG_MISC_CTRL0 0xca23
5118#define MDIO_PMA_REG_LRM_MODE 0xca3f
5119#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
5120#define MDIO_PMA_REG_MISC_CTRL1 0xca85
5121
5122#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
5123#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
5124#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
5125#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
5126#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
5127#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
5128#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
5129#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
5130#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
5131#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
5132#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
5133#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
5134
5135#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
5136#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
5137#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
5138#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
5139#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
5140#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
5141#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
5142#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
5143
5144#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
5145#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
5146#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
5147
5148#define MDIO_PMA_REG_7101_RESET 0xc000
5149#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
5150#define MDIO_PMA_REG_7101_VER1 0xc026
5151#define MDIO_PMA_REG_7101_VER2 0xc027
5152
5153#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
5154#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5155#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5156#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5157#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5158#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
5159#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
5160
5161
5162#define MDIO_WIS_DEVAD 0x2
5163/*bcm*/
5164#define MDIO_WIS_REG_LASI_CNTL 0x9002
5165#define MDIO_WIS_REG_LASI_STATUS 0x9005
5166
5167#define MDIO_PCS_DEVAD 0x3
5168#define MDIO_PCS_REG_STATUS 0x0020
5169#define MDIO_PCS_REG_LASI_STATUS 0x9005
5170#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
5171#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
5172#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
5173#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
5174#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
5175#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
5176#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
5177#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
5178#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
5179
5180
5181#define MDIO_XS_DEVAD 0x4
5182#define MDIO_XS_PLL_SEQUENCER 0x8000
5183#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
5184
5185#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
5186#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
5187#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
5188#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
5189#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
5190
5191#define MDIO_AN_DEVAD 0x7
5192/*ieee*/
5193#define MDIO_AN_REG_CTRL 0x0000
5194#define MDIO_AN_REG_STATUS 0x0001
5195#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
5196#define MDIO_AN_REG_ADV_PAUSE 0x0010
5197#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
5198#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
5199#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
5200#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
5201#define MDIO_AN_REG_ADV 0x0011
5202#define MDIO_AN_REG_ADV2 0x0012
5203#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
5204#define MDIO_AN_REG_MASTER_STATUS 0x0021
5205/*bcm*/
5206#define MDIO_AN_REG_LINK_STATUS 0x8304
5207#define MDIO_AN_REG_CL37_CL73 0x8370
5208#define MDIO_AN_REG_CL37_AN 0xffe0
5209#define MDIO_AN_REG_CL37_FC_LD 0xffe4
5210#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5211
5212#define MDIO_AN_REG_8073_2_5G 0x8329
5213
5214#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
5215#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
5216#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
5217#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
5218#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
5219#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
5220
5221#define IGU_FUNC_BASE 0x0400
5222
5223#define IGU_ADDR_MSIX 0x0000
5224#define IGU_ADDR_INT_ACK 0x0200
5225#define IGU_ADDR_PROD_UPD 0x0201
5226#define IGU_ADDR_ATTN_BITS_UPD 0x0202
5227#define IGU_ADDR_ATTN_BITS_SET 0x0203
5228#define IGU_ADDR_ATTN_BITS_CLR 0x0204
5229#define IGU_ADDR_COALESCE_NOW 0x0205
5230#define IGU_ADDR_SIMD_MASK 0x0206
5231#define IGU_ADDR_SIMD_NOMASK 0x0207
5232#define IGU_ADDR_MSI_CTL 0x0210
5233#define IGU_ADDR_MSI_ADDR_LO 0x0211
5234#define IGU_ADDR_MSI_ADDR_HI 0x0212
5235#define IGU_ADDR_MSI_DATA 0x0213
5236
5237#define IGU_INT_ENABLE 0
5238#define IGU_INT_DISABLE 1
5239#define IGU_INT_NOP 2
5240#define IGU_INT_NOP2 3
5241
5242#define COMMAND_REG_INT_ACK 0x0
5243#define COMMAND_REG_PROD_UPD 0x4
5244#define COMMAND_REG_ATTN_BITS_UPD 0x8
5245#define COMMAND_REG_ATTN_BITS_SET 0xc
5246#define COMMAND_REG_ATTN_BITS_CLR 0x10
5247#define COMMAND_REG_COALESCE_NOW 0x14
5248#define COMMAND_REG_SIMD_MASK 0x18
5249#define COMMAND_REG_SIMD_NOMASK 0x1c
5250
5251
5252#define IGU_MEM_BASE 0x0000
5253
5254#define IGU_MEM_MSIX_BASE 0x0000
5255#define IGU_MEM_MSIX_UPPER 0x007f
5256#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
5257
5258#define IGU_MEM_PBA_MSIX_BASE 0x0200
5259#define IGU_MEM_PBA_MSIX_UPPER 0x0200
5260
5261#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
5262#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
5263
5264#define IGU_CMD_INT_ACK_BASE 0x0400
5265#define IGU_CMD_INT_ACK_UPPER\
5266 (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
5267#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
5268
5269#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
5270#define IGU_CMD_E2_PROD_UPD_UPPER\
5271 (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
5272#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
5273
5274#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
5275#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
5276#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
5277
5278#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
5279#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
5280#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
5281#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
5282
5283#define IGU_REG_RESERVED_UPPER 0x05ff
5284
5285
5286#define CDU_REGION_NUMBER_XCM_AG 2
5287#define CDU_REGION_NUMBER_UCM_AG 4
5288
5289
5290/**
5291 * String-to-compress [31:8] = CID (all 24 bits)
5292 * String-to-compress [7:4] = Region
5293 * String-to-compress [3:0] = Type
5294 */
5295#define CDU_VALID_DATA(_cid, _region, _type)\
5296 (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
5297#define CDU_CRC8(_cid, _region, _type)\
5298 (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
5299#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
5300 (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
5301#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
5302 (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
5303#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
5304
5305/******************************************************************************
5306 * Description:
5307 * Calculates crc 8 on a word value: polynomial 0-1-2-8
5308 * Code was translated from Verilog.
5309 * Return:
5310 *****************************************************************************/
5311static inline u8 calc_crc8(u32 data, u8 crc)
5312{
5313 u8 D[32];
5314 u8 NewCRC[8];
5315 u8 C[8];
5316 u8 crc_res;
5317 u8 i;
5318
5319 /* split the data into 31 bits */
5320 for (i = 0; i < 32; i++) {
5321 D[i] = (u8)(data & 1);
5322 data = data >> 1;
5323 }
5324
5325 /* split the crc into 8 bits */
5326 for (i = 0; i < 8; i++) {
5327 C[i] = crc & 1;
5328 crc = crc >> 1;
5329 }
5330
5331 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5332 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5333 C[6] ^ C[7];
5334 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5335 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5336 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
5337 C[6];
5338 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5339 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5340 C[0] ^ C[1] ^ C[4] ^ C[5];
5341 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5342 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5343 C[1] ^ C[2] ^ C[5] ^ C[6];
5344 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5345 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5346 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5347 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5348 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5349 C[3] ^ C[4] ^ C[7];
5350 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5351 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
5352 C[5];
5353 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5354 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
5355 C[6];
5356
5357 crc_res = 0;
5358 for (i = 0; i < 8; i++)
5359 crc_res |= (NewCRC[i] << i);
5360
5361 return crc_res;
5362}
5363
5364